2024-11-15 22:35:42,559 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-15 22:35:42,571 main DEBUG Took 0.009867 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-15 22:35:42,572 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-15 22:35:42,573 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-15 22:35:42,574 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-15 22:35:42,575 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 22:35:42,583 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-15 22:35:42,597 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 22:35:42,599 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 22:35:42,600 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 22:35:42,601 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 22:35:42,601 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 22:35:42,602 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 22:35:42,603 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 22:35:42,603 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 22:35:42,604 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 22:35:42,604 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 22:35:42,605 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 22:35:42,605 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 22:35:42,606 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 22:35:42,607 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 22:35:42,607 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 22:35:42,608 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 22:35:42,608 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 22:35:42,609 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 22:35:42,609 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 22:35:42,610 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 22:35:42,610 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 22:35:42,610 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 22:35:42,611 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 22:35:42,611 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 22:35:42,612 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 22:35:42,613 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-15 22:35:42,615 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 22:35:42,616 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-15 22:35:42,619 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-15 22:35:42,619 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-15 22:35:42,621 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-15 22:35:42,622 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-15 22:35:42,631 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-15 22:35:42,634 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-15 22:35:42,636 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-15 22:35:42,636 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-15 22:35:42,637 main DEBUG createAppenders(={Console}) 2024-11-15 22:35:42,637 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-15 22:35:42,638 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-15 22:35:42,638 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-15 22:35:42,639 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-15 22:35:42,639 main DEBUG OutputStream closed 2024-11-15 22:35:42,640 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-15 22:35:42,640 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-15 22:35:42,641 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-15 22:35:42,725 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-15 22:35:42,728 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-15 22:35:42,729 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-15 22:35:42,730 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-15 22:35:42,731 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-15 22:35:42,732 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-15 22:35:42,732 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-15 22:35:42,733 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-15 22:35:42,733 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-15 22:35:42,733 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-15 22:35:42,734 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-15 22:35:42,734 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-15 22:35:42,735 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-15 22:35:42,735 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-15 22:35:42,735 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-15 22:35:42,736 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-15 22:35:42,736 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-15 22:35:42,737 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-15 22:35:42,739 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-15 22:35:42,739 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-15 22:35:42,739 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-15 22:35:42,740 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-15T22:35:42,987 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205 2024-11-15 22:35:42,990 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-15 22:35:42,990 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-15T22:35:42,998 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-15T22:35:43,035 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=246, ProcessCount=11, AvailableMemoryMB=5305 2024-11-15T22:35:43,039 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T22:35:43,059 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/cluster_6e8fd9e5-47e5-4c48-2126-63d8c2abcff2, deleteOnExit=true 2024-11-15T22:35:43,059 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T22:35:43,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/test.cache.data in system properties and HBase conf 2024-11-15T22:35:43,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T22:35:43,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/hadoop.log.dir in system properties and HBase conf 2024-11-15T22:35:43,063 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T22:35:43,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T22:35:43,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T22:35:43,171 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-15T22:35:43,262 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T22:35:43,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T22:35:43,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T22:35:43,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T22:35:43,269 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T22:35:43,269 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T22:35:43,270 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T22:35:43,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T22:35:43,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T22:35:43,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T22:35:43,273 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/nfs.dump.dir in system properties and HBase conf 2024-11-15T22:35:43,273 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/java.io.tmpdir in system properties and HBase conf 2024-11-15T22:35:43,274 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T22:35:43,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T22:35:43,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T22:35:43,705 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T22:35:44,345 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-15T22:35:44,411 INFO [Time-limited test {}] log.Log(170): Logging initialized @2568ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-15T22:35:44,475 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:35:44,537 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:35:44,557 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:35:44,557 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:35:44,558 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:35:44,570 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:35:44,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:35:44,574 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:35:44,740 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/java.io.tmpdir/jetty-localhost-42219-hadoop-hdfs-3_4_1-tests_jar-_-any-18326089990718003313/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T22:35:44,745 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:42219} 2024-11-15T22:35:44,746 INFO [Time-limited test {}] server.Server(415): Started @2903ms 2024-11-15T22:35:44,773 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T22:35:45,400 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:35:45,408 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:35:45,409 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:35:45,409 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:35:45,409 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T22:35:45,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:35:45,411 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:35:45,512 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/java.io.tmpdir/jetty-localhost-39027-hadoop-hdfs-3_4_1-tests_jar-_-any-1597434617216984527/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:35:45,513 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:39027} 2024-11-15T22:35:45,514 INFO [Time-limited test {}] server.Server(415): Started @3671ms 2024-11-15T22:35:45,565 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:35:45,668 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:35:45,673 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:35:45,674 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:35:45,674 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:35:45,675 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:35:45,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:35:45,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:35:45,778 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/java.io.tmpdir/jetty-localhost-34251-hadoop-hdfs-3_4_1-tests_jar-_-any-1560443346119711273/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:35:45,779 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:34251} 2024-11-15T22:35:45,779 INFO [Time-limited test {}] server.Server(415): Started @3937ms 2024-11-15T22:35:45,781 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:35:47,051 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/cluster_6e8fd9e5-47e5-4c48-2126-63d8c2abcff2/data/data2/current/BP-1037677707-172.17.0.3-1731710143782/current, will proceed with Du for space computation calculation, 2024-11-15T22:35:47,051 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/cluster_6e8fd9e5-47e5-4c48-2126-63d8c2abcff2/data/data4/current/BP-1037677707-172.17.0.3-1731710143782/current, will proceed with Du for space computation calculation, 2024-11-15T22:35:47,051 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/cluster_6e8fd9e5-47e5-4c48-2126-63d8c2abcff2/data/data3/current/BP-1037677707-172.17.0.3-1731710143782/current, will proceed with Du for space computation calculation, 2024-11-15T22:35:47,051 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/cluster_6e8fd9e5-47e5-4c48-2126-63d8c2abcff2/data/data1/current/BP-1037677707-172.17.0.3-1731710143782/current, will proceed with Du for space computation calculation, 2024-11-15T22:35:47,084 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:35:47,085 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:35:47,143 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x63f07fd6628dcf81 with lease ID 0x9e6484761f257a6b: Processing first storage report for DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f from datanode DatanodeRegistration(127.0.0.1:41735, datanodeUuid=adc56cfe-08ee-46dc-938b-64638fc7a83d, infoPort=37393, infoSecurePort=0, ipcPort=44559, storageInfo=lv=-57;cid=testClusterID;nsid=1235081871;c=1731710143782) 2024-11-15T22:35:47,144 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x63f07fd6628dcf81 with lease ID 0x9e6484761f257a6b: from storage DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f node DatanodeRegistration(127.0.0.1:41735, datanodeUuid=adc56cfe-08ee-46dc-938b-64638fc7a83d, infoPort=37393, infoSecurePort=0, ipcPort=44559, storageInfo=lv=-57;cid=testClusterID;nsid=1235081871;c=1731710143782), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T22:35:47,145 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9151ae870448c34b with lease ID 0x9e6484761f257a6c: Processing first storage report for DS-40d7c125-64ec-4200-9d7b-591a21aa6e88 from datanode DatanodeRegistration(127.0.0.1:46469, datanodeUuid=cac71d14-5e07-4464-b117-eddbd54b84c8, infoPort=34389, infoSecurePort=0, ipcPort=45799, storageInfo=lv=-57;cid=testClusterID;nsid=1235081871;c=1731710143782) 2024-11-15T22:35:47,145 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9151ae870448c34b with lease ID 0x9e6484761f257a6c: from storage DS-40d7c125-64ec-4200-9d7b-591a21aa6e88 node DatanodeRegistration(127.0.0.1:46469, datanodeUuid=cac71d14-5e07-4464-b117-eddbd54b84c8, infoPort=34389, infoSecurePort=0, ipcPort=45799, storageInfo=lv=-57;cid=testClusterID;nsid=1235081871;c=1731710143782), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T22:35:47,145 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x63f07fd6628dcf81 with lease ID 0x9e6484761f257a6b: Processing first storage report for DS-05d8c4d2-0ff6-403c-9ba7-3359dfc053fc from datanode DatanodeRegistration(127.0.0.1:41735, datanodeUuid=adc56cfe-08ee-46dc-938b-64638fc7a83d, infoPort=37393, infoSecurePort=0, ipcPort=44559, storageInfo=lv=-57;cid=testClusterID;nsid=1235081871;c=1731710143782) 2024-11-15T22:35:47,145 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x63f07fd6628dcf81 with lease ID 0x9e6484761f257a6b: from storage DS-05d8c4d2-0ff6-403c-9ba7-3359dfc053fc node DatanodeRegistration(127.0.0.1:41735, datanodeUuid=adc56cfe-08ee-46dc-938b-64638fc7a83d, infoPort=37393, infoSecurePort=0, ipcPort=44559, storageInfo=lv=-57;cid=testClusterID;nsid=1235081871;c=1731710143782), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:35:47,146 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9151ae870448c34b with lease ID 0x9e6484761f257a6c: Processing first storage report for DS-dd1166de-7cea-4fe5-98d6-ece4d7955975 from datanode DatanodeRegistration(127.0.0.1:46469, datanodeUuid=cac71d14-5e07-4464-b117-eddbd54b84c8, infoPort=34389, infoSecurePort=0, ipcPort=45799, storageInfo=lv=-57;cid=testClusterID;nsid=1235081871;c=1731710143782) 2024-11-15T22:35:47,146 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9151ae870448c34b with lease ID 0x9e6484761f257a6c: from storage DS-dd1166de-7cea-4fe5-98d6-ece4d7955975 node DatanodeRegistration(127.0.0.1:46469, datanodeUuid=cac71d14-5e07-4464-b117-eddbd54b84c8, infoPort=34389, infoSecurePort=0, ipcPort=45799, storageInfo=lv=-57;cid=testClusterID;nsid=1235081871;c=1731710143782), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:35:47,194 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205 2024-11-15T22:35:47,257 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/cluster_6e8fd9e5-47e5-4c48-2126-63d8c2abcff2/zookeeper_0, clientPort=59677, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/cluster_6e8fd9e5-47e5-4c48-2126-63d8c2abcff2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/cluster_6e8fd9e5-47e5-4c48-2126-63d8c2abcff2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T22:35:47,267 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59677 2024-11-15T22:35:47,282 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:35:47,285 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:35:47,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741825_1001 (size=7) 2024-11-15T22:35:47,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741825_1001 (size=7) 2024-11-15T22:35:47,895 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9 with version=8 2024-11-15T22:35:47,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/hbase-staging 2024-11-15T22:35:47,974 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-15T22:35:48,213 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e611192d6313:0 server-side Connection retries=45 2024-11-15T22:35:48,222 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:35:48,223 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T22:35:48,227 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T22:35:48,227 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:35:48,227 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T22:35:48,352 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T22:35:48,405 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-15T22:35:48,414 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-15T22:35:48,417 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T22:35:48,439 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 101283 (auto-detected) 2024-11-15T22:35:48,440 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-15T22:35:48,462 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34701 2024-11-15T22:35:48,486 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34701 connecting to ZooKeeper ensemble=127.0.0.1:59677 2024-11-15T22:35:48,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:347010x0, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T22:35:48,618 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34701-0x10140a3d6530000 connected 2024-11-15T22:35:48,699 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:35:48,702 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:35:48,713 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:35:48,717 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9, hbase.cluster.distributed=false 2024-11-15T22:35:48,740 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T22:35:48,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34701 2024-11-15T22:35:48,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34701 2024-11-15T22:35:48,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34701 2024-11-15T22:35:48,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34701 2024-11-15T22:35:48,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34701 2024-11-15T22:35:48,851 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e611192d6313:0 server-side Connection retries=45 2024-11-15T22:35:48,853 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:35:48,853 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T22:35:48,854 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T22:35:48,854 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:35:48,854 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T22:35:48,857 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T22:35:48,859 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T22:35:48,860 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38539 2024-11-15T22:35:48,862 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38539 connecting to ZooKeeper ensemble=127.0.0.1:59677 2024-11-15T22:35:48,863 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:35:48,867 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:35:48,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:385390x0, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T22:35:48,887 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:385390x0, quorum=127.0.0.1:59677, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:35:48,887 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38539-0x10140a3d6530001 connected 2024-11-15T22:35:48,891 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T22:35:48,898 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T22:35:48,900 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T22:35:48,907 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T22:35:48,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38539 2024-11-15T22:35:48,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38539 2024-11-15T22:35:48,911 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38539 2024-11-15T22:35:48,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38539 2024-11-15T22:35:48,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38539 2024-11-15T22:35:48,932 DEBUG [M:0;e611192d6313:34701 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e611192d6313:34701 2024-11-15T22:35:48,933 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e611192d6313,34701,1731710148064 2024-11-15T22:35:48,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:35:48,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:35:48,952 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e611192d6313,34701,1731710148064 2024-11-15T22:35:48,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:35:48,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T22:35:48,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:35:48,983 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T22:35:48,984 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e611192d6313,34701,1731710148064 from backup master directory 2024-11-15T22:35:48,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e611192d6313,34701,1731710148064 2024-11-15T22:35:48,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:35:48,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:35:48,992 WARN [master/e611192d6313:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T22:35:48,992 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e611192d6313,34701,1731710148064 2024-11-15T22:35:48,995 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-15T22:35:48,996 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-15T22:35:49,050 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/hbase.id] with ID: 671d03b2-de3c-422b-8db0-924ba11b71ef 2024-11-15T22:35:49,050 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/.tmp/hbase.id 2024-11-15T22:35:49,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741826_1002 (size=42) 2024-11-15T22:35:49,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741826_1002 (size=42) 2024-11-15T22:35:49,065 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/.tmp/hbase.id]:[hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/hbase.id] 2024-11-15T22:35:49,109 INFO [master/e611192d6313:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:35:49,114 INFO [master/e611192d6313:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T22:35:49,132 INFO [master/e611192d6313:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-15T22:35:49,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:35:49,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:35:49,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741827_1003 (size=196) 2024-11-15T22:35:49,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741827_1003 (size=196) 2024-11-15T22:35:49,168 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T22:35:49,170 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T22:35:49,175 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:35:49,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741828_1004 (size=1189) 2024-11-15T22:35:49,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741828_1004 (size=1189) 2024-11-15T22:35:49,220 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store 2024-11-15T22:35:49,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741829_1005 (size=34) 2024-11-15T22:35:49,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741829_1005 (size=34) 2024-11-15T22:35:49,242 INFO [master/e611192d6313:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-15T22:35:49,245 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:35:49,246 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T22:35:49,246 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:35:49,247 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:35:49,248 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T22:35:49,248 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:35:49,248 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:35:49,249 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731710149246Disabling compacts and flushes for region at 1731710149246Disabling writes for close at 1731710149248 (+2 ms)Writing region close event to WAL at 1731710149248Closed at 1731710149248 2024-11-15T22:35:49,252 WARN [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/.initializing 2024-11-15T22:35:49,252 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/WALs/e611192d6313,34701,1731710148064 2024-11-15T22:35:49,271 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C34701%2C1731710148064, suffix=, logDir=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/WALs/e611192d6313,34701,1731710148064, archiveDir=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/oldWALs, maxLogs=10 2024-11-15T22:35:49,279 INFO [master/e611192d6313:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C34701%2C1731710148064.1731710149275 2024-11-15T22:35:49,296 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/WALs/e611192d6313,34701,1731710148064/e611192d6313%2C34701%2C1731710148064.1731710149275 2024-11-15T22:35:49,302 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37393:37393),(127.0.0.1/127.0.0.1:34389:34389)] 2024-11-15T22:35:49,303 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:35:49,304 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:35:49,307 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:35:49,308 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:35:49,343 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:35:49,368 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T22:35:49,372 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:35:49,374 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:35:49,374 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:35:49,377 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T22:35:49,378 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:35:49,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:35:49,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:35:49,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T22:35:49,383 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:35:49,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:35:49,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:35:49,386 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T22:35:49,387 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:35:49,387 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:35:49,388 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:35:49,392 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:35:49,393 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:35:49,400 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:35:49,400 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:35:49,403 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T22:35:49,407 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:35:49,411 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:35:49,413 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847242, jitterRate=0.07732413709163666}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T22:35:49,420 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731710149318Initializing all the Stores at 1731710149320 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710149320Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710149321 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710149321Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710149321Cleaning up temporary data from old regions at 1731710149400 (+79 ms)Region opened successfully at 1731710149420 (+20 ms) 2024-11-15T22:35:49,421 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T22:35:49,452 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33ac49cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e611192d6313/172.17.0.3:0 2024-11-15T22:35:49,480 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T22:35:49,489 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T22:35:49,490 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T22:35:49,492 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T22:35:49,493 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-15T22:35:49,498 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-15T22:35:49,498 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T22:35:49,526 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T22:35:49,535 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T22:35:49,581 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T22:35:49,584 INFO [master/e611192d6313:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T22:35:49,587 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T22:35:49,591 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T22:35:49,594 INFO [master/e611192d6313:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T22:35:49,598 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T22:35:49,602 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T22:35:49,603 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T22:35:49,612 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T22:35:49,631 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T22:35:49,643 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T22:35:49,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T22:35:49,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T22:35:49,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:35:49,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:35:49,659 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e611192d6313,34701,1731710148064, sessionid=0x10140a3d6530000, setting cluster-up flag (Was=false) 2024-11-15T22:35:49,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:35:49,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:35:49,718 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T22:35:49,721 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e611192d6313,34701,1731710148064 2024-11-15T22:35:49,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:35:49,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:35:49,781 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T22:35:49,784 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e611192d6313,34701,1731710148064 2024-11-15T22:35:49,794 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T22:35:49,817 INFO [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(746): ClusterId : 671d03b2-de3c-422b-8db0-924ba11b71ef 2024-11-15T22:35:49,819 DEBUG [RS:0;e611192d6313:38539 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T22:35:49,835 DEBUG [RS:0;e611192d6313:38539 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T22:35:49,835 DEBUG [RS:0;e611192d6313:38539 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T22:35:49,845 DEBUG [RS:0;e611192d6313:38539 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T22:35:49,846 DEBUG [RS:0;e611192d6313:38539 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@753031b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e611192d6313/172.17.0.3:0 2024-11-15T22:35:49,858 DEBUG [RS:0;e611192d6313:38539 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e611192d6313:38539 2024-11-15T22:35:49,861 INFO [RS:0;e611192d6313:38539 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T22:35:49,861 INFO [RS:0;e611192d6313:38539 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T22:35:49,861 DEBUG [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T22:35:49,863 INFO [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(2659): reportForDuty to master=e611192d6313,34701,1731710148064 with port=38539, startcode=1731710148816 2024-11-15T22:35:49,867 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T22:35:49,872 DEBUG [RS:0;e611192d6313:38539 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T22:35:49,876 INFO [master/e611192d6313:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T22:35:49,882 INFO [master/e611192d6313:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T22:35:49,887 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e611192d6313,34701,1731710148064 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T22:35:49,894 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:35:49,894 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:35:49,894 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:35:49,894 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:35:49,895 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e611192d6313:0, corePoolSize=10, maxPoolSize=10 2024-11-15T22:35:49,895 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:35:49,895 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e611192d6313:0, corePoolSize=2, maxPoolSize=2 2024-11-15T22:35:49,895 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:35:49,900 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:35:49,900 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T22:35:49,902 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731710179902 2024-11-15T22:35:49,904 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T22:35:49,905 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T22:35:49,907 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:35:49,907 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T22:35:49,909 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T22:35:49,909 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T22:35:49,910 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T22:35:49,910 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T22:35:49,911 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:49,919 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T22:35:49,920 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T22:35:49,921 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T22:35:49,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741831_1007 (size=1321) 2024-11-15T22:35:49,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741831_1007 (size=1321) 2024-11-15T22:35:49,927 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T22:35:49,927 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9 2024-11-15T22:35:49,928 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T22:35:49,928 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T22:35:49,932 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710149929,5,FailOnTimeoutGroup] 2024-11-15T22:35:49,933 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710149932,5,FailOnTimeoutGroup] 2024-11-15T22:35:49,933 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:49,933 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T22:35:49,934 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:49,935 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:49,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741832_1008 (size=32) 2024-11-15T22:35:49,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741832_1008 (size=32) 2024-11-15T22:35:49,944 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53961, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T22:35:49,946 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:35:49,950 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34701 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e611192d6313,38539,1731710148816 2024-11-15T22:35:49,951 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T22:35:49,953 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34701 {}] master.ServerManager(517): Registering regionserver=e611192d6313,38539,1731710148816 2024-11-15T22:35:49,954 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T22:35:49,954 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:35:49,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:35:49,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T22:35:49,958 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T22:35:49,959 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:35:49,960 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:35:49,960 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T22:35:49,963 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T22:35:49,964 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:35:49,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:35:49,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T22:35:49,968 DEBUG [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9 2024-11-15T22:35:49,968 DEBUG [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39371 2024-11-15T22:35:49,968 DEBUG [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T22:35:49,968 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T22:35:49,969 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:35:49,970 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:35:49,970 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T22:35:49,972 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740 2024-11-15T22:35:49,972 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740 2024-11-15T22:35:49,975 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T22:35:49,975 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T22:35:49,976 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T22:35:49,978 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T22:35:49,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:35:49,982 DEBUG [RS:0;e611192d6313:38539 {}] zookeeper.ZKUtil(111): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e611192d6313,38539,1731710148816 2024-11-15T22:35:49,982 WARN [RS:0;e611192d6313:38539 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T22:35:49,982 INFO [RS:0;e611192d6313:38539 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:35:49,982 DEBUG [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816 2024-11-15T22:35:49,983 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:35:49,984 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708843, jitterRate=-0.09865975379943848}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T22:35:49,986 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e611192d6313,38539,1731710148816] 2024-11-15T22:35:49,989 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731710149948Initializing all the Stores at 1731710149950 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710149950Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710149950Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710149950Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710149950Cleaning up temporary data from old regions at 1731710149975 (+25 ms)Region opened successfully at 1731710149989 (+14 ms) 2024-11-15T22:35:49,989 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T22:35:49,989 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T22:35:49,989 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T22:35:49,990 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T22:35:49,990 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T22:35:49,991 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T22:35:49,991 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731710149989Disabling compacts and flushes for region at 1731710149989Disabling writes for close at 1731710149990 (+1 ms)Writing region close event to WAL at 1731710149991 (+1 ms)Closed at 1731710149991 2024-11-15T22:35:49,994 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:35:49,995 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T22:35:50,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T22:35:50,006 INFO [RS:0;e611192d6313:38539 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T22:35:50,009 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T22:35:50,012 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T22:35:50,021 INFO [RS:0;e611192d6313:38539 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T22:35:50,025 INFO [RS:0;e611192d6313:38539 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T22:35:50,025 INFO [RS:0;e611192d6313:38539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:50,026 INFO [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T22:35:50,030 INFO [RS:0;e611192d6313:38539 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T22:35:50,032 INFO [RS:0;e611192d6313:38539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:50,032 DEBUG [RS:0;e611192d6313:38539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:35:50,032 DEBUG [RS:0;e611192d6313:38539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:35:50,032 DEBUG [RS:0;e611192d6313:38539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:35:50,032 DEBUG [RS:0;e611192d6313:38539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:35:50,032 DEBUG [RS:0;e611192d6313:38539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:35:50,033 DEBUG [RS:0;e611192d6313:38539 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e611192d6313:0, corePoolSize=2, maxPoolSize=2 2024-11-15T22:35:50,033 DEBUG [RS:0;e611192d6313:38539 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:35:50,033 DEBUG [RS:0;e611192d6313:38539 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:35:50,033 DEBUG [RS:0;e611192d6313:38539 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:35:50,033 DEBUG [RS:0;e611192d6313:38539 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:35:50,033 DEBUG [RS:0;e611192d6313:38539 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:35:50,033 DEBUG [RS:0;e611192d6313:38539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:35:50,033 DEBUG [RS:0;e611192d6313:38539 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:35:50,034 DEBUG [RS:0;e611192d6313:38539 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:35:50,034 INFO [RS:0;e611192d6313:38539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:50,035 INFO [RS:0;e611192d6313:38539 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:50,035 INFO [RS:0;e611192d6313:38539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:50,035 INFO [RS:0;e611192d6313:38539 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:50,035 INFO [RS:0;e611192d6313:38539 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:50,035 INFO [RS:0;e611192d6313:38539 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,38539,1731710148816-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T22:35:50,053 INFO [RS:0;e611192d6313:38539 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T22:35:50,055 INFO [RS:0;e611192d6313:38539 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,38539,1731710148816-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:50,055 INFO [RS:0;e611192d6313:38539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:50,055 INFO [RS:0;e611192d6313:38539 {}] regionserver.Replication(171): e611192d6313,38539,1731710148816 started 2024-11-15T22:35:50,071 INFO [RS:0;e611192d6313:38539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:50,072 INFO [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(1482): Serving as e611192d6313,38539,1731710148816, RpcServer on e611192d6313/172.17.0.3:38539, sessionid=0x10140a3d6530001 2024-11-15T22:35:50,072 DEBUG [RS:0;e611192d6313:38539 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T22:35:50,072 DEBUG [RS:0;e611192d6313:38539 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e611192d6313,38539,1731710148816 2024-11-15T22:35:50,073 DEBUG [RS:0;e611192d6313:38539 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,38539,1731710148816' 2024-11-15T22:35:50,073 DEBUG [RS:0;e611192d6313:38539 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T22:35:50,074 DEBUG [RS:0;e611192d6313:38539 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T22:35:50,075 DEBUG [RS:0;e611192d6313:38539 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T22:35:50,075 DEBUG [RS:0;e611192d6313:38539 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T22:35:50,075 DEBUG [RS:0;e611192d6313:38539 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e611192d6313,38539,1731710148816 2024-11-15T22:35:50,075 DEBUG [RS:0;e611192d6313:38539 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,38539,1731710148816' 2024-11-15T22:35:50,075 DEBUG [RS:0;e611192d6313:38539 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T22:35:50,076 DEBUG [RS:0;e611192d6313:38539 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T22:35:50,077 DEBUG [RS:0;e611192d6313:38539 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T22:35:50,077 INFO [RS:0;e611192d6313:38539 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T22:35:50,077 INFO [RS:0;e611192d6313:38539 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T22:35:50,163 WARN [e611192d6313:34701 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T22:35:50,187 INFO [RS:0;e611192d6313:38539 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C38539%2C1731710148816, suffix=, logDir=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816, archiveDir=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/oldWALs, maxLogs=32 2024-11-15T22:35:50,190 INFO [RS:0;e611192d6313:38539 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C38539%2C1731710148816.1731710150190 2024-11-15T22:35:50,200 INFO [RS:0;e611192d6313:38539 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710150190 2024-11-15T22:35:50,202 DEBUG [RS:0;e611192d6313:38539 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34389:34389),(127.0.0.1/127.0.0.1:37393:37393)] 2024-11-15T22:35:50,419 DEBUG [e611192d6313:34701 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T22:35:50,432 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e611192d6313,38539,1731710148816 2024-11-15T22:35:50,438 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e611192d6313,38539,1731710148816, state=OPENING 2024-11-15T22:35:50,508 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T22:35:50,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:35:50,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:35:50,521 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:35:50,522 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:35:50,525 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T22:35:50,528 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e611192d6313,38539,1731710148816}] 2024-11-15T22:35:50,710 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T22:35:50,713 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54373, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T22:35:50,726 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T22:35:50,727 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:35:50,730 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C38539%2C1731710148816.meta, suffix=.meta, logDir=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816, archiveDir=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/oldWALs, maxLogs=32 2024-11-15T22:35:50,732 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C38539%2C1731710148816.meta.1731710150732.meta 2024-11-15T22:35:50,740 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.meta.1731710150732.meta 2024-11-15T22:35:50,743 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34389:34389),(127.0.0.1/127.0.0.1:37393:37393)] 2024-11-15T22:35:50,744 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:35:50,746 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T22:35:50,748 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T22:35:50,753 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T22:35:50,757 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T22:35:50,757 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:35:50,757 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T22:35:50,757 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T22:35:50,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T22:35:50,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T22:35:50,762 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:35:50,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:35:50,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T22:35:50,765 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T22:35:50,765 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:35:50,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:35:50,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T22:35:50,768 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T22:35:50,768 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:35:50,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:35:50,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T22:35:50,771 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T22:35:50,771 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:35:50,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:35:50,772 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T22:35:50,773 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740 2024-11-15T22:35:50,776 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740 2024-11-15T22:35:50,778 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T22:35:50,778 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T22:35:50,779 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T22:35:50,782 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T22:35:50,783 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=752445, jitterRate=-0.04321789741516113}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T22:35:50,783 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T22:35:50,785 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731710150758Writing region info on filesystem at 1731710150758Initializing all the Stores at 1731710150760 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710150760Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710150760Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710150760Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710150760Cleaning up temporary data from old regions at 1731710150778 (+18 ms)Running coprocessor post-open hooks at 1731710150783 (+5 ms)Region opened successfully at 1731710150785 (+2 ms) 2024-11-15T22:35:50,791 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731710150702 2024-11-15T22:35:50,802 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T22:35:50,803 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T22:35:50,804 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e611192d6313,38539,1731710148816 2024-11-15T22:35:50,806 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e611192d6313,38539,1731710148816, state=OPEN 2024-11-15T22:35:50,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T22:35:50,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T22:35:50,927 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:35:50,927 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:35:50,928 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e611192d6313,38539,1731710148816 2024-11-15T22:35:50,936 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T22:35:50,936 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e611192d6313,38539,1731710148816 in 400 msec 2024-11-15T22:35:50,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T22:35:50,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 938 msec 2024-11-15T22:35:50,945 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:35:50,945 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T22:35:50,963 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T22:35:50,964 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e611192d6313,38539,1731710148816, seqNum=-1] 2024-11-15T22:35:50,981 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T22:35:50,983 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46849, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T22:35:51,003 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1780 sec 2024-11-15T22:35:51,003 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731710151003, completionTime=-1 2024-11-15T22:35:51,007 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T22:35:51,007 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T22:35:51,030 INFO [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T22:35:51,030 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731710211030 2024-11-15T22:35:51,030 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731710271030 2024-11-15T22:35:51,030 INFO [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 22 msec 2024-11-15T22:35:51,032 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34701,1731710148064-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:51,033 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34701,1731710148064-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:51,033 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34701,1731710148064-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:51,034 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e611192d6313:34701, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:51,034 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:51,035 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:51,040 DEBUG [master/e611192d6313:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T22:35:51,058 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.065sec 2024-11-15T22:35:51,059 INFO [master/e611192d6313:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T22:35:51,061 INFO [master/e611192d6313:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T22:35:51,062 INFO [master/e611192d6313:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T22:35:51,062 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T22:35:51,062 INFO [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T22:35:51,063 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34701,1731710148064-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T22:35:51,064 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34701,1731710148064-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T22:35:51,073 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T22:35:51,074 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T22:35:51,074 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34701,1731710148064-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:35:51,147 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66cb686a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:35:51,150 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-15T22:35:51,150 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-15T22:35:51,154 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e611192d6313,34701,-1 for getting cluster id 2024-11-15T22:35:51,161 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T22:35:51,170 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '671d03b2-de3c-422b-8db0-924ba11b71ef' 2024-11-15T22:35:51,174 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T22:35:51,174 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "671d03b2-de3c-422b-8db0-924ba11b71ef" 2024-11-15T22:35:51,176 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d7dfff7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:35:51,176 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e611192d6313,34701,-1] 2024-11-15T22:35:51,178 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T22:35:51,180 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:35:51,181 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59264, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T22:35:51,184 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32fb6022, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:35:51,185 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T22:35:51,192 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e611192d6313,38539,1731710148816, seqNum=-1] 2024-11-15T22:35:51,192 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T22:35:51,195 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42390, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T22:35:51,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e611192d6313,34701,1731710148064 2024-11-15T22:35:51,218 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:35:51,226 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T22:35:51,231 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T22:35:51,235 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is e611192d6313,34701,1731710148064 2024-11-15T22:35:51,237 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@47d6b2cf 2024-11-15T22:35:51,238 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T22:35:51,241 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59268, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T22:35:51,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T22:35:51,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T22:35:51,246 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T22:35:51,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-15T22:35:51,255 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T22:35:51,257 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-15T22:35:51,257 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:35:51,259 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T22:35:51,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T22:35:51,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741835_1011 (size=389) 2024-11-15T22:35:51,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741835_1011 (size=389) 2024-11-15T22:35:51,319 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3be2cba3fb7ac07a66be1d5545b3e83d, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9 2024-11-15T22:35:51,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741836_1012 (size=72) 2024-11-15T22:35:51,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741836_1012 (size=72) 2024-11-15T22:35:51,331 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:35:51,331 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 3be2cba3fb7ac07a66be1d5545b3e83d, disabling compactions & flushes 2024-11-15T22:35:51,331 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. 2024-11-15T22:35:51,331 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. 2024-11-15T22:35:51,331 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. after waiting 0 ms 2024-11-15T22:35:51,331 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. 2024-11-15T22:35:51,331 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. 2024-11-15T22:35:51,331 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3be2cba3fb7ac07a66be1d5545b3e83d: Waiting for close lock at 1731710151331Disabling compacts and flushes for region at 1731710151331Disabling writes for close at 1731710151331Writing region close event to WAL at 1731710151331Closed at 1731710151331 2024-11-15T22:35:51,333 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T22:35:51,337 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731710151333"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731710151333"}]},"ts":"1731710151333"} 2024-11-15T22:35:51,342 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T22:35:51,344 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T22:35:51,346 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731710151344"}]},"ts":"1731710151344"} 2024-11-15T22:35:51,351 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-15T22:35:51,352 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3be2cba3fb7ac07a66be1d5545b3e83d, ASSIGN}] 2024-11-15T22:35:51,355 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3be2cba3fb7ac07a66be1d5545b3e83d, ASSIGN 2024-11-15T22:35:51,356 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3be2cba3fb7ac07a66be1d5545b3e83d, ASSIGN; state=OFFLINE, location=e611192d6313,38539,1731710148816; forceNewPlan=false, retain=false 2024-11-15T22:35:51,509 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3be2cba3fb7ac07a66be1d5545b3e83d, regionState=OPENING, regionLocation=e611192d6313,38539,1731710148816 2024-11-15T22:35:51,513 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3be2cba3fb7ac07a66be1d5545b3e83d, ASSIGN because future has completed 2024-11-15T22:35:51,514 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3be2cba3fb7ac07a66be1d5545b3e83d, server=e611192d6313,38539,1731710148816}] 2024-11-15T22:35:51,685 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. 2024-11-15T22:35:51,685 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3be2cba3fb7ac07a66be1d5545b3e83d, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d.', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:35:51,686 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:35:51,686 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:35:51,686 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:35:51,686 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:35:51,689 INFO [StoreOpener-3be2cba3fb7ac07a66be1d5545b3e83d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:35:51,691 INFO [StoreOpener-3be2cba3fb7ac07a66be1d5545b3e83d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3be2cba3fb7ac07a66be1d5545b3e83d columnFamilyName info 2024-11-15T22:35:51,691 DEBUG [StoreOpener-3be2cba3fb7ac07a66be1d5545b3e83d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:35:51,692 INFO [StoreOpener-3be2cba3fb7ac07a66be1d5545b3e83d-1 {}] regionserver.HStore(327): Store=3be2cba3fb7ac07a66be1d5545b3e83d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:35:51,693 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:35:51,694 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:35:51,695 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:35:51,696 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:35:51,696 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:35:51,698 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:35:51,702 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:35:51,703 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3be2cba3fb7ac07a66be1d5545b3e83d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804731, jitterRate=0.0232694149017334}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T22:35:51,703 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:35:51,704 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3be2cba3fb7ac07a66be1d5545b3e83d: Running coprocessor pre-open hook at 1731710151686Writing region info on filesystem at 1731710151686Initializing all the Stores at 1731710151688 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710151688Cleaning up temporary data from old regions at 1731710151696 (+8 ms)Running coprocessor post-open hooks at 1731710151703 (+7 ms)Region opened successfully at 1731710151703 2024-11-15T22:35:51,705 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d., pid=6, masterSystemTime=1731710151669 2024-11-15T22:35:51,710 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. 2024-11-15T22:35:51,710 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. 2024-11-15T22:35:51,711 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3be2cba3fb7ac07a66be1d5545b3e83d, regionState=OPEN, openSeqNum=2, regionLocation=e611192d6313,38539,1731710148816 2024-11-15T22:35:51,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3be2cba3fb7ac07a66be1d5545b3e83d, server=e611192d6313,38539,1731710148816 because future has completed 2024-11-15T22:35:51,721 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T22:35:51,721 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3be2cba3fb7ac07a66be1d5545b3e83d, server=e611192d6313,38539,1731710148816 in 203 msec 2024-11-15T22:35:51,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T22:35:51,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=3be2cba3fb7ac07a66be1d5545b3e83d, ASSIGN in 369 msec 2024-11-15T22:35:51,726 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T22:35:51,727 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731710151727"}]},"ts":"1731710151727"} 2024-11-15T22:35:51,730 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-15T22:35:51,732 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T22:35:51,736 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 484 msec 2024-11-15T22:35:56,271 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-15T22:35:56,321 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T22:35:56,323 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-15T22:35:58,403 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T22:35:58,404 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T22:35:58,410 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-15T22:35:58,411 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-15T22:35:58,412 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T22:35:58,413 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T22:35:58,413 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T22:35:58,413 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-15T22:36:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T22:36:01,301 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-15T22:36:01,304 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-15T22:36:01,313 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-15T22:36:01,314 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. 2024-11-15T22:36:01,315 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C38539%2C1731710148816.1731710161314 2024-11-15T22:36:01,324 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:01,324 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:01,324 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:01,325 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:01,325 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:01,325 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710150190 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710161314 2024-11-15T22:36:01,327 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34389:34389),(127.0.0.1/127.0.0.1:37393:37393)] 2024-11-15T22:36:01,327 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710150190 is not closed yet, will try archiving it next time 2024-11-15T22:36:01,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741833_1009 (size=451) 2024-11-15T22:36:01,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741833_1009 (size=451) 2024-11-15T22:36:01,331 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710150190 to hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/oldWALs/e611192d6313%2C38539%2C1731710148816.1731710150190 2024-11-15T22:36:01,338 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d., hostname=e611192d6313,38539,1731710148816, seqNum=2] 2024-11-15T22:36:13,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38539 {}] regionserver.HRegion(8855): Flush requested on 3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:36:13,403 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3be2cba3fb7ac07a66be1d5545b3e83d 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T22:36:13,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/eb2ab5ea96f042cbb1fe1399c899ab0c is 1080, key is row0001/info:/1731710161341/Put/seqid=0 2024-11-15T22:36:13,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741838_1014 (size=12509) 2024-11-15T22:36:13,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741838_1014 (size=12509) 2024-11-15T22:36:13,471 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/eb2ab5ea96f042cbb1fe1399c899ab0c 2024-11-15T22:36:13,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/eb2ab5ea96f042cbb1fe1399c899ab0c as hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/eb2ab5ea96f042cbb1fe1399c899ab0c 2024-11-15T22:36:13,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/eb2ab5ea96f042cbb1fe1399c899ab0c, entries=7, sequenceid=11, filesize=12.2 K 2024-11-15T22:36:13,531 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 3be2cba3fb7ac07a66be1d5545b3e83d in 131ms, sequenceid=11, compaction requested=false 2024-11-15T22:36:13,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3be2cba3fb7ac07a66be1d5545b3e83d: 2024-11-15T22:36:17,190 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T22:36:21,418 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C38539%2C1731710148816.1731710181417 2024-11-15T22:36:21,636 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 212 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:21,636 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:21,637 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:21,637 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:21,637 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:21,637 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:21,637 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710161314 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710181417 2024-11-15T22:36:21,640 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34389:34389),(127.0.0.1/127.0.0.1:37393:37393)] 2024-11-15T22:36:21,640 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710161314 is not closed yet, will try archiving it next time 2024-11-15T22:36:21,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741837_1013 (size=12399) 2024-11-15T22:36:21,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741837_1013 (size=12399) 2024-11-15T22:36:21,845 INFO [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:24,054 INFO [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:26,259 INFO [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:28,468 INFO [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:28,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38539 {}] regionserver.HRegion(8855): Flush requested on 3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:36:28,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3be2cba3fb7ac07a66be1d5545b3e83d 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T22:36:28,673 INFO [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:28,685 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/35c8c53a2d0c4aa1aa5a959604314fa3 is 1080, key is row0008/info:/1731710175400/Put/seqid=0 2024-11-15T22:36:28,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741840_1016 (size=12509) 2024-11-15T22:36:28,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741840_1016 (size=12509) 2024-11-15T22:36:28,743 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/35c8c53a2d0c4aa1aa5a959604314fa3 2024-11-15T22:36:28,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/35c8c53a2d0c4aa1aa5a959604314fa3 as hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/35c8c53a2d0c4aa1aa5a959604314fa3 2024-11-15T22:36:28,770 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/35c8c53a2d0c4aa1aa5a959604314fa3, entries=7, sequenceid=21, filesize=12.2 K 2024-11-15T22:36:28,972 INFO [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:28,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 3be2cba3fb7ac07a66be1d5545b3e83d in 504ms, sequenceid=21, compaction requested=false 2024-11-15T22:36:28,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3be2cba3fb7ac07a66be1d5545b3e83d: 2024-11-15T22:36:28,973 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-15T22:36:28,973 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:36:28,974 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/eb2ab5ea96f042cbb1fe1399c899ab0c because midkey is the same as first or last row 2024-11-15T22:36:30,677 INFO [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:32,053 INFO [master/e611192d6313:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T22:36:32,053 INFO [master/e611192d6313:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T22:36:32,890 INFO [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:32,892 WARN [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:32,893 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e611192d6313%2C38539%2C1731710148816:(num 1731710181417) roll requested 2024-11-15T22:36:32,893 INFO [regionserver/e611192d6313:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C38539%2C1731710148816.1731710192893 2024-11-15T22:36:33,106 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 211 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:33,107 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:33,107 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:33,107 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:33,107 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:33,108 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:33,108 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710181417 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710192893 2024-11-15T22:36:33,110 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37393:37393),(127.0.0.1/127.0.0.1:34389:34389)] 2024-11-15T22:36:33,110 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710181417 is not closed yet, will try archiving it next time 2024-11-15T22:36:33,110 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710161314 to hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/oldWALs/e611192d6313%2C38539%2C1731710148816.1731710161314 2024-11-15T22:36:33,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741839_1015 (size=7739) 2024-11-15T22:36:33,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741839_1015 (size=7739) 2024-11-15T22:36:35,095 INFO [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK], DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK]] 2024-11-15T22:36:36,686 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3be2cba3fb7ac07a66be1d5545b3e83d, had cached 0 bytes from a total of 25018 2024-11-15T22:36:37,301 INFO [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK], DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK]] 2024-11-15T22:36:39,510 INFO [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK], DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK]] 2024-11-15T22:36:41,717 INFO [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK], DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK]] 2024-11-15T22:36:43,721 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T22:36:43,721 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C38539%2C1731710148816.1731710203721 2024-11-15T22:36:47,191 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T22:36:48,737 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5012 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK], DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK]] 2024-11-15T22:36:48,740 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5012 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK], DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK]] 2024-11-15T22:36:48,740 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e611192d6313%2C38539%2C1731710148816:(num 1731710203721) roll requested 2024-11-15T22:36:48,741 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:48,741 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:48,741 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:48,741 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:48,741 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:48,742 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710192893 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710203721 2024-11-15T22:36:48,743 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34389:34389),(127.0.0.1/127.0.0.1:37393:37393)] 2024-11-15T22:36:48,743 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710192893 is not closed yet, will try archiving it next time 2024-11-15T22:36:48,743 INFO [regionserver/e611192d6313:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C38539%2C1731710148816.1731710208743 2024-11-15T22:36:48,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741841_1017 (size=4753) 2024-11-15T22:36:48,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741841_1017 (size=4753) 2024-11-15T22:36:53,749 INFO [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5003 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:53,749 WARN [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5003 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:53,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38539 {}] regionserver.HRegion(8855): Flush requested on 3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:36:53,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3be2cba3fb7ac07a66be1d5545b3e83d 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T22:36:53,756 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:53,756 WARN [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:55,752 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T22:36:58,753 INFO [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:58,753 WARN [FSHLog-0-hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9-prefix:e611192d6313,38539,1731710148816 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46469,DS-40d7c125-64ec-4200-9d7b-591a21aa6e88,DISK], DatanodeInfoWithStorage[127.0.0.1:41735,DS-cf603ce9-e51f-4be0-883e-8fff5c32ba0f,DISK]] 2024-11-15T22:36:58,753 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:58,753 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:58,753 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:58,754 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:58,754 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:58,754 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710203721 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710208743 2024-11-15T22:36:58,756 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34389:34389),(127.0.0.1/127.0.0.1:37393:37393)] 2024-11-15T22:36:58,756 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710203721 is not closed yet, will try archiving it next time 2024-11-15T22:36:58,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741842_1018 (size=1569) 2024-11-15T22:36:58,756 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e611192d6313%2C38539%2C1731710148816:(num 1731710208743) roll requested 2024-11-15T22:36:58,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741842_1018 (size=1569) 2024-11-15T22:36:58,757 INFO [regionserver/e611192d6313:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C38539%2C1731710148816.1731710218756 2024-11-15T22:36:58,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/d73b6dbb9d2b4c7bad06f99b1c94293c is 1080, key is row0015/info:/1731710190473/Put/seqid=0 2024-11-15T22:36:58,765 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:58,765 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:58,766 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:58,766 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:58,766 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:58,766 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710208743 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710218756 2024-11-15T22:36:58,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741845_1021 (size=12509) 2024-11-15T22:36:58,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741843_1019 (size=93) 2024-11-15T22:36:58,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741843_1019 (size=93) 2024-11-15T22:36:58,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741845_1021 (size=12509) 2024-11-15T22:36:58,769 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/d73b6dbb9d2b4c7bad06f99b1c94293c 2024-11-15T22:36:58,769 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710208743 to hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/oldWALs/e611192d6313%2C38539%2C1731710148816.1731710208743 2024-11-15T22:36:58,776 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34389:34389),(127.0.0.1/127.0.0.1:37393:37393)] 2024-11-15T22:36:58,777 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C38539%2C1731710148816.1731710218776 2024-11-15T22:36:58,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/d73b6dbb9d2b4c7bad06f99b1c94293c as hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/d73b6dbb9d2b4c7bad06f99b1c94293c 2024-11-15T22:36:58,784 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:58,785 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:58,785 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:58,785 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:58,785 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:36:58,785 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710218756 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710218776 2024-11-15T22:36:58,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741844_1020 (size=93) 2024-11-15T22:36:58,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741844_1020 (size=93) 2024-11-15T22:36:58,788 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37393:37393),(127.0.0.1/127.0.0.1:34389:34389)] 2024-11-15T22:36:58,788 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/WALs/e611192d6313,38539,1731710148816/e611192d6313%2C38539%2C1731710148816.1731710218756 to hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/oldWALs/e611192d6313%2C38539%2C1731710148816.1731710218756 2024-11-15T22:36:58,794 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/d73b6dbb9d2b4c7bad06f99b1c94293c, entries=7, sequenceid=31, filesize=12.2 K 2024-11-15T22:36:58,796 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=1.05 KB/1076 for 3be2cba3fb7ac07a66be1d5545b3e83d in 5047ms, sequenceid=31, compaction requested=true 2024-11-15T22:36:58,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3be2cba3fb7ac07a66be1d5545b3e83d: 2024-11-15T22:36:58,796 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-15T22:36:58,796 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:36:58,797 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/eb2ab5ea96f042cbb1fe1399c899ab0c because midkey is the same as first or last row 2024-11-15T22:36:58,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3be2cba3fb7ac07a66be1d5545b3e83d:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T22:36:58,800 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T22:36:58,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:36:58,802 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T22:36:58,804 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.HStore(1541): 3be2cba3fb7ac07a66be1d5545b3e83d/info is initiating minor compaction (all files) 2024-11-15T22:36:58,804 INFO [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3be2cba3fb7ac07a66be1d5545b3e83d/info in TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. 2024-11-15T22:36:58,804 INFO [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/eb2ab5ea96f042cbb1fe1399c899ab0c, hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/35c8c53a2d0c4aa1aa5a959604314fa3, hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/d73b6dbb9d2b4c7bad06f99b1c94293c] into tmpdir=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp, totalSize=36.6 K 2024-11-15T22:36:58,805 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] compactions.Compactor(225): Compacting eb2ab5ea96f042cbb1fe1399c899ab0c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731710161341 2024-11-15T22:36:58,806 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 35c8c53a2d0c4aa1aa5a959604314fa3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731710175400 2024-11-15T22:36:58,806 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] compactions.Compactor(225): Compacting d73b6dbb9d2b4c7bad06f99b1c94293c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731710190473 2024-11-15T22:36:58,834 INFO [RS:0;e611192d6313:38539-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3be2cba3fb7ac07a66be1d5545b3e83d#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:36:58,835 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/c7398d2fbf1b4db08426544ab04d818b is 1080, key is row0001/info:/1731710161341/Put/seqid=0 2024-11-15T22:36:58,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741847_1023 (size=27710) 2024-11-15T22:36:58,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741847_1023 (size=27710) 2024-11-15T22:36:58,852 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/c7398d2fbf1b4db08426544ab04d818b as hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/c7398d2fbf1b4db08426544ab04d818b 2024-11-15T22:36:58,867 INFO [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3be2cba3fb7ac07a66be1d5545b3e83d/info of 3be2cba3fb7ac07a66be1d5545b3e83d into c7398d2fbf1b4db08426544ab04d818b(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:36:58,867 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3be2cba3fb7ac07a66be1d5545b3e83d: 2024-11-15T22:36:58,869 INFO [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d., storeName=3be2cba3fb7ac07a66be1d5545b3e83d/info, priority=13, startTime=1731710218798; duration=0sec 2024-11-15T22:36:58,869 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-15T22:36:58,869 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:36:58,869 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/c7398d2fbf1b4db08426544ab04d818b because midkey is the same as first or last row 2024-11-15T22:36:58,869 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-15T22:36:58,869 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:36:58,869 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/c7398d2fbf1b4db08426544ab04d818b because midkey is the same as first or last row 2024-11-15T22:36:58,870 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-15T22:36:58,870 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:36:58,870 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/c7398d2fbf1b4db08426544ab04d818b because midkey is the same as first or last row 2024-11-15T22:36:58,870 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:36:58,870 DEBUG [RS:0;e611192d6313:38539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3be2cba3fb7ac07a66be1d5545b3e83d:info 2024-11-15T22:37:10,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38539 {}] regionserver.HRegion(8855): Flush requested on 3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:37:10,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3be2cba3fb7ac07a66be1d5545b3e83d 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T22:37:10,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/483be9c881f949bab1911643189da67d is 1080, key is row0022/info:/1731710218790/Put/seqid=0 2024-11-15T22:37:10,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741848_1024 (size=12509) 2024-11-15T22:37:10,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741848_1024 (size=12509) 2024-11-15T22:37:10,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/483be9c881f949bab1911643189da67d 2024-11-15T22:37:10,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/483be9c881f949bab1911643189da67d as hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/483be9c881f949bab1911643189da67d 2024-11-15T22:37:10,867 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/483be9c881f949bab1911643189da67d, entries=7, sequenceid=42, filesize=12.2 K 2024-11-15T22:37:10,869 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 3be2cba3fb7ac07a66be1d5545b3e83d in 37ms, sequenceid=42, compaction requested=false 2024-11-15T22:37:10,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3be2cba3fb7ac07a66be1d5545b3e83d: 2024-11-15T22:37:10,869 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-15T22:37:10,869 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:37:10,869 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/c7398d2fbf1b4db08426544ab04d818b because midkey is the same as first or last row 2024-11-15T22:37:17,191 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T22:37:18,855 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T22:37:18,856 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T22:37:18,857 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:37:18,866 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:37:18,867 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:37:18,867 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T22:37:18,867 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T22:37:18,867 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=245640440, stopped=false 2024-11-15T22:37:18,868 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e611192d6313,34701,1731710148064 2024-11-15T22:37:18,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T22:37:18,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T22:37:18,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:18,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:18,945 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T22:37:18,945 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T22:37:18,946 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:37:18,946 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:37:18,946 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:37:18,946 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:37:18,947 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e611192d6313,38539,1731710148816' ***** 2024-11-15T22:37:18,947 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T22:37:18,948 INFO [RS:0;e611192d6313:38539 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T22:37:18,948 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T22:37:18,948 INFO [RS:0;e611192d6313:38539 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T22:37:18,949 INFO [RS:0;e611192d6313:38539 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T22:37:18,949 INFO [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(3091): Received CLOSE for 3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:37:18,950 INFO [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(959): stopping server e611192d6313,38539,1731710148816 2024-11-15T22:37:18,950 INFO [RS:0;e611192d6313:38539 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T22:37:18,950 INFO [RS:0;e611192d6313:38539 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e611192d6313:38539. 2024-11-15T22:37:18,951 DEBUG [RS:0;e611192d6313:38539 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:37:18,951 DEBUG [RS:0;e611192d6313:38539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:37:18,951 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3be2cba3fb7ac07a66be1d5545b3e83d, disabling compactions & flushes 2024-11-15T22:37:18,951 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. 2024-11-15T22:37:18,951 INFO [RS:0;e611192d6313:38539 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T22:37:18,951 INFO [RS:0;e611192d6313:38539 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T22:37:18,951 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. 2024-11-15T22:37:18,951 INFO [RS:0;e611192d6313:38539 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T22:37:18,951 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. after waiting 0 ms 2024-11-15T22:37:18,951 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. 2024-11-15T22:37:18,952 INFO [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T22:37:18,952 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 3be2cba3fb7ac07a66be1d5545b3e83d 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-15T22:37:18,952 INFO [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T22:37:18,952 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T22:37:18,952 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T22:37:18,952 DEBUG [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 3be2cba3fb7ac07a66be1d5545b3e83d=TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d.} 2024-11-15T22:37:18,952 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T22:37:18,952 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T22:37:18,952 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T22:37:18,953 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-15T22:37:18,953 DEBUG [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 3be2cba3fb7ac07a66be1d5545b3e83d 2024-11-15T22:37:18,959 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/633f24baf54e4495a679a36fcea23b2c is 1080, key is row0029/info:/1731710232836/Put/seqid=0 2024-11-15T22:37:18,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741849_1025 (size=8193) 2024-11-15T22:37:18,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741849_1025 (size=8193) 2024-11-15T22:37:18,969 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/633f24baf54e4495a679a36fcea23b2c 2024-11-15T22:37:18,974 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/.tmp/info/fb47074d1a4d4f46a16b7495603de50b is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d./info:regioninfo/1731710151710/Put/seqid=0 2024-11-15T22:37:18,978 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/.tmp/info/633f24baf54e4495a679a36fcea23b2c as hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/633f24baf54e4495a679a36fcea23b2c 2024-11-15T22:37:18,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741850_1026 (size=7016) 2024-11-15T22:37:18,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741850_1026 (size=7016) 2024-11-15T22:37:18,982 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/.tmp/info/fb47074d1a4d4f46a16b7495603de50b 2024-11-15T22:37:18,987 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/633f24baf54e4495a679a36fcea23b2c, entries=3, sequenceid=48, filesize=8.0 K 2024-11-15T22:37:18,988 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 3be2cba3fb7ac07a66be1d5545b3e83d in 36ms, sequenceid=48, compaction requested=true 2024-11-15T22:37:18,989 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/eb2ab5ea96f042cbb1fe1399c899ab0c, hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/35c8c53a2d0c4aa1aa5a959604314fa3, hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/d73b6dbb9d2b4c7bad06f99b1c94293c] to archive 2024-11-15T22:37:18,992 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T22:37:18,995 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/eb2ab5ea96f042cbb1fe1399c899ab0c to hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/archive/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/eb2ab5ea96f042cbb1fe1399c899ab0c 2024-11-15T22:37:18,998 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/35c8c53a2d0c4aa1aa5a959604314fa3 to hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/archive/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/35c8c53a2d0c4aa1aa5a959604314fa3 2024-11-15T22:37:19,000 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/d73b6dbb9d2b4c7bad06f99b1c94293c to hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/archive/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/info/d73b6dbb9d2b4c7bad06f99b1c94293c 2024-11-15T22:37:19,005 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/.tmp/ns/3adfed41196f46fda7ab340faf51efe2 is 43, key is default/ns:d/1731710150987/Put/seqid=0 2024-11-15T22:37:19,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741851_1027 (size=5153) 2024-11-15T22:37:19,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741851_1027 (size=5153) 2024-11-15T22:37:19,012 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/.tmp/ns/3adfed41196f46fda7ab340faf51efe2 2024-11-15T22:37:19,012 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e611192d6313:34701 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-15T22:37:19,017 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [eb2ab5ea96f042cbb1fe1399c899ab0c=12509, 35c8c53a2d0c4aa1aa5a959604314fa3=12509, d73b6dbb9d2b4c7bad06f99b1c94293c=12509] 2024-11-15T22:37:19,023 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/default/TestLogRolling-testSlowSyncLogRolling/3be2cba3fb7ac07a66be1d5545b3e83d/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-15T22:37:19,026 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. 2024-11-15T22:37:19,026 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3be2cba3fb7ac07a66be1d5545b3e83d: Waiting for close lock at 1731710238950Running coprocessor pre-close hooks at 1731710238951 (+1 ms)Disabling compacts and flushes for region at 1731710238951Disabling writes for close at 1731710238951Obtaining lock to block concurrent updates at 1731710238952 (+1 ms)Preparing flush snapshotting stores in 3be2cba3fb7ac07a66be1d5545b3e83d at 1731710238952Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731710238952Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. at 1731710238954 (+2 ms)Flushing 3be2cba3fb7ac07a66be1d5545b3e83d/info: creating writer at 1731710238954Flushing 3be2cba3fb7ac07a66be1d5545b3e83d/info: appending metadata at 1731710238959 (+5 ms)Flushing 3be2cba3fb7ac07a66be1d5545b3e83d/info: closing flushed file at 1731710238959Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75685d81: reopening flushed file at 1731710238976 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 3be2cba3fb7ac07a66be1d5545b3e83d in 36ms, sequenceid=48, compaction requested=true at 1731710238988 (+12 ms)Writing region close event to WAL at 1731710239018 (+30 ms)Running coprocessor post-close hooks at 1731710239024 (+6 ms)Closed at 1731710239026 (+2 ms) 2024-11-15T22:37:19,027 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731710151242.3be2cba3fb7ac07a66be1d5545b3e83d. 2024-11-15T22:37:19,036 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/.tmp/table/6601d8e1a1074bc09c82e57a9ee8bde5 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731710151727/Put/seqid=0 2024-11-15T22:37:19,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741852_1028 (size=5396) 2024-11-15T22:37:19,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741852_1028 (size=5396) 2024-11-15T22:37:19,043 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/.tmp/table/6601d8e1a1074bc09c82e57a9ee8bde5 2024-11-15T22:37:19,052 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/.tmp/info/fb47074d1a4d4f46a16b7495603de50b as hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/info/fb47074d1a4d4f46a16b7495603de50b 2024-11-15T22:37:19,061 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/info/fb47074d1a4d4f46a16b7495603de50b, entries=10, sequenceid=11, filesize=6.9 K 2024-11-15T22:37:19,062 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/.tmp/ns/3adfed41196f46fda7ab340faf51efe2 as hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/ns/3adfed41196f46fda7ab340faf51efe2 2024-11-15T22:37:19,071 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/ns/3adfed41196f46fda7ab340faf51efe2, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T22:37:19,072 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/.tmp/table/6601d8e1a1074bc09c82e57a9ee8bde5 as hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/table/6601d8e1a1074bc09c82e57a9ee8bde5 2024-11-15T22:37:19,080 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/table/6601d8e1a1074bc09c82e57a9ee8bde5, entries=2, sequenceid=11, filesize=5.3 K 2024-11-15T22:37:19,082 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 130ms, sequenceid=11, compaction requested=false 2024-11-15T22:37:19,088 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T22:37:19,088 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T22:37:19,089 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T22:37:19,089 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731710238952Running coprocessor pre-close hooks at 1731710238952Disabling compacts and flushes for region at 1731710238952Disabling writes for close at 1731710238952Obtaining lock to block concurrent updates at 1731710238953 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731710238953Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731710238953Flushing stores of hbase:meta,,1.1588230740 at 1731710238955 (+2 ms)Flushing 1588230740/info: creating writer at 1731710238955Flushing 1588230740/info: appending metadata at 1731710238974 (+19 ms)Flushing 1588230740/info: closing flushed file at 1731710238974Flushing 1588230740/ns: creating writer at 1731710238989 (+15 ms)Flushing 1588230740/ns: appending metadata at 1731710239005 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731710239005Flushing 1588230740/table: creating writer at 1731710239021 (+16 ms)Flushing 1588230740/table: appending metadata at 1731710239036 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731710239036Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5bf4812a: reopening flushed file at 1731710239050 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@753da3f1: reopening flushed file at 1731710239061 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b44a221: reopening flushed file at 1731710239071 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 130ms, sequenceid=11, compaction requested=false at 1731710239082 (+11 ms)Writing region close event to WAL at 1731710239083 (+1 ms)Running coprocessor post-close hooks at 1731710239088 (+5 ms)Closed at 1731710239089 (+1 ms) 2024-11-15T22:37:19,089 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T22:37:19,111 INFO [regionserver/e611192d6313:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T22:37:19,111 INFO [regionserver/e611192d6313:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T22:37:19,153 INFO [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(976): stopping server e611192d6313,38539,1731710148816; all regions closed. 2024-11-15T22:37:19,155 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:19,155 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:19,155 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:19,155 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:19,155 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:19,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741834_1010 (size=3066) 2024-11-15T22:37:19,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741834_1010 (size=3066) 2024-11-15T22:37:19,161 DEBUG [RS:0;e611192d6313:38539 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/oldWALs 2024-11-15T22:37:19,161 INFO [RS:0;e611192d6313:38539 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e611192d6313%2C38539%2C1731710148816.meta:.meta(num 1731710150732) 2024-11-15T22:37:19,161 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:19,161 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:19,162 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:19,162 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:19,162 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:19,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741846_1022 (size=14205) 2024-11-15T22:37:19,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741846_1022 (size=14205) 2024-11-15T22:37:19,170 DEBUG [RS:0;e611192d6313:38539 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/oldWALs 2024-11-15T22:37:19,170 INFO [RS:0;e611192d6313:38539 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e611192d6313%2C38539%2C1731710148816:(num 1731710218776) 2024-11-15T22:37:19,170 DEBUG [RS:0;e611192d6313:38539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:37:19,170 INFO [RS:0;e611192d6313:38539 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T22:37:19,170 INFO [RS:0;e611192d6313:38539 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T22:37:19,171 INFO [RS:0;e611192d6313:38539 {}] hbase.ChoreService(370): Chore service for: regionserver/e611192d6313:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T22:37:19,171 INFO [RS:0;e611192d6313:38539 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T22:37:19,171 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T22:37:19,171 INFO [RS:0;e611192d6313:38539 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38539 2024-11-15T22:37:19,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e611192d6313,38539,1731710148816 2024-11-15T22:37:19,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:37:19,186 INFO [RS:0;e611192d6313:38539 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T22:37:19,197 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e611192d6313,38539,1731710148816] 2024-11-15T22:37:19,207 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e611192d6313,38539,1731710148816 already deleted, retry=false 2024-11-15T22:37:19,207 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e611192d6313,38539,1731710148816 expired; onlineServers=0 2024-11-15T22:37:19,208 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e611192d6313,34701,1731710148064' ***** 2024-11-15T22:37:19,208 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T22:37:19,208 INFO [M:0;e611192d6313:34701 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T22:37:19,208 INFO [M:0;e611192d6313:34701 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T22:37:19,208 DEBUG [M:0;e611192d6313:34701 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T22:37:19,208 DEBUG [M:0;e611192d6313:34701 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T22:37:19,208 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T22:37:19,208 DEBUG [master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710149932 {}] cleaner.HFileCleaner(306): Exit Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710149932,5,FailOnTimeoutGroup] 2024-11-15T22:37:19,208 DEBUG [master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710149929 {}] cleaner.HFileCleaner(306): Exit Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710149929,5,FailOnTimeoutGroup] 2024-11-15T22:37:19,208 INFO [M:0;e611192d6313:34701 {}] hbase.ChoreService(370): Chore service for: master/e611192d6313:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T22:37:19,209 INFO [M:0;e611192d6313:34701 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T22:37:19,209 DEBUG [M:0;e611192d6313:34701 {}] master.HMaster(1795): Stopping service threads 2024-11-15T22:37:19,209 INFO [M:0;e611192d6313:34701 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T22:37:19,209 INFO [M:0;e611192d6313:34701 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T22:37:19,209 INFO [M:0;e611192d6313:34701 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T22:37:19,209 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T22:37:19,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T22:37:19,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:19,218 DEBUG [M:0;e611192d6313:34701 {}] zookeeper.ZKUtil(347): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T22:37:19,218 WARN [M:0;e611192d6313:34701 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T22:37:19,219 INFO [M:0;e611192d6313:34701 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/.lastflushedseqids 2024-11-15T22:37:19,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741853_1029 (size=130) 2024-11-15T22:37:19,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741853_1029 (size=130) 2024-11-15T22:37:19,233 INFO [M:0;e611192d6313:34701 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T22:37:19,233 INFO [M:0;e611192d6313:34701 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T22:37:19,234 DEBUG [M:0;e611192d6313:34701 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T22:37:19,234 INFO [M:0;e611192d6313:34701 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:19,234 DEBUG [M:0;e611192d6313:34701 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:19,234 DEBUG [M:0;e611192d6313:34701 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T22:37:19,234 DEBUG [M:0;e611192d6313:34701 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:19,234 INFO [M:0;e611192d6313:34701 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.01 KB heapSize=29.18 KB 2024-11-15T22:37:19,256 DEBUG [M:0;e611192d6313:34701 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c452d712a984e93a0186cdb01194860 is 82, key is hbase:meta,,1/info:regioninfo/1731710150804/Put/seqid=0 2024-11-15T22:37:19,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741854_1030 (size=5672) 2024-11-15T22:37:19,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741854_1030 (size=5672) 2024-11-15T22:37:19,262 INFO [M:0;e611192d6313:34701 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c452d712a984e93a0186cdb01194860 2024-11-15T22:37:19,285 DEBUG [M:0;e611192d6313:34701 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dfb8b3c0b1584efe9046f058fad2274a is 765, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731710151735/Put/seqid=0 2024-11-15T22:37:19,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741855_1031 (size=6246) 2024-11-15T22:37:19,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741855_1031 (size=6246) 2024-11-15T22:37:19,292 INFO [M:0;e611192d6313:34701 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.41 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dfb8b3c0b1584efe9046f058fad2274a 2024-11-15T22:37:19,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:37:19,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38539-0x10140a3d6530001, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:37:19,297 INFO [RS:0;e611192d6313:38539 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T22:37:19,297 INFO [RS:0;e611192d6313:38539 {}] regionserver.HRegionServer(1031): Exiting; stopping=e611192d6313,38539,1731710148816; zookeeper connection closed. 2024-11-15T22:37:19,298 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6d5c689d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6d5c689d 2024-11-15T22:37:19,298 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T22:37:19,299 INFO [M:0;e611192d6313:34701 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for dfb8b3c0b1584efe9046f058fad2274a 2024-11-15T22:37:19,313 DEBUG [M:0;e611192d6313:34701 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d571a7ff62fd43fdaac4ce85de5d8038 is 69, key is e611192d6313,38539,1731710148816/rs:state/1731710149955/Put/seqid=0 2024-11-15T22:37:19,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741856_1032 (size=5156) 2024-11-15T22:37:19,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741856_1032 (size=5156) 2024-11-15T22:37:19,320 INFO [M:0;e611192d6313:34701 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d571a7ff62fd43fdaac4ce85de5d8038 2024-11-15T22:37:19,339 DEBUG [M:0;e611192d6313:34701 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/045ab5d835184f73878a8bd2e1fc8646 is 52, key is load_balancer_on/state:d/1731710151223/Put/seqid=0 2024-11-15T22:37:19,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741857_1033 (size=5056) 2024-11-15T22:37:19,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741857_1033 (size=5056) 2024-11-15T22:37:19,346 INFO [M:0;e611192d6313:34701 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/045ab5d835184f73878a8bd2e1fc8646 2024-11-15T22:37:19,352 DEBUG [M:0;e611192d6313:34701 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c452d712a984e93a0186cdb01194860 as hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7c452d712a984e93a0186cdb01194860 2024-11-15T22:37:19,359 INFO [M:0;e611192d6313:34701 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7c452d712a984e93a0186cdb01194860, entries=8, sequenceid=59, filesize=5.5 K 2024-11-15T22:37:19,360 DEBUG [M:0;e611192d6313:34701 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dfb8b3c0b1584efe9046f058fad2274a as hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dfb8b3c0b1584efe9046f058fad2274a 2024-11-15T22:37:19,367 INFO [M:0;e611192d6313:34701 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for dfb8b3c0b1584efe9046f058fad2274a 2024-11-15T22:37:19,367 INFO [M:0;e611192d6313:34701 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dfb8b3c0b1584efe9046f058fad2274a, entries=6, sequenceid=59, filesize=6.1 K 2024-11-15T22:37:19,368 DEBUG [M:0;e611192d6313:34701 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d571a7ff62fd43fdaac4ce85de5d8038 as hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d571a7ff62fd43fdaac4ce85de5d8038 2024-11-15T22:37:19,375 INFO [M:0;e611192d6313:34701 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d571a7ff62fd43fdaac4ce85de5d8038, entries=1, sequenceid=59, filesize=5.0 K 2024-11-15T22:37:19,376 DEBUG [M:0;e611192d6313:34701 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/045ab5d835184f73878a8bd2e1fc8646 as hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/045ab5d835184f73878a8bd2e1fc8646 2024-11-15T22:37:19,382 INFO [M:0;e611192d6313:34701 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/045ab5d835184f73878a8bd2e1fc8646, entries=1, sequenceid=59, filesize=4.9 K 2024-11-15T22:37:19,384 INFO [M:0;e611192d6313:34701 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 150ms, sequenceid=59, compaction requested=false 2024-11-15T22:37:19,385 INFO [M:0;e611192d6313:34701 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:19,386 DEBUG [M:0;e611192d6313:34701 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731710239233Disabling compacts and flushes for region at 1731710239233Disabling writes for close at 1731710239234 (+1 ms)Obtaining lock to block concurrent updates at 1731710239234Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731710239234Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23564, getHeapSize=29816, getOffHeapSize=0, getCellsCount=70 at 1731710239235 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731710239236 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731710239236Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731710239255 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731710239255Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731710239270 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731710239284 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731710239284Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731710239299 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731710239313 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731710239313Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731710239326 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731710239339 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731710239339Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10ac0799: reopening flushed file at 1731710239351 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bda813: reopening flushed file at 1731710239359 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a40d967: reopening flushed file at 1731710239367 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@dd67d5d: reopening flushed file at 1731710239375 (+8 ms)Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 150ms, sequenceid=59, compaction requested=false at 1731710239384 (+9 ms)Writing region close event to WAL at 1731710239385 (+1 ms)Closed at 1731710239385 2024-11-15T22:37:19,386 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:19,387 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:19,387 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:19,387 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:19,387 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:19,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46469 is added to blk_1073741830_1006 (size=27961) 2024-11-15T22:37:19,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41735 is added to blk_1073741830_1006 (size=27961) 2024-11-15T22:37:19,390 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T22:37:19,390 INFO [M:0;e611192d6313:34701 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T22:37:19,390 INFO [M:0;e611192d6313:34701 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34701 2024-11-15T22:37:19,390 INFO [M:0;e611192d6313:34701 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T22:37:19,497 INFO [M:0;e611192d6313:34701 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T22:37:19,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:37:19,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34701-0x10140a3d6530000, quorum=127.0.0.1:59677, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:37:19,534 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:19,537 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:37:19,538 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:37:19,538 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:37:19,538 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/hadoop.log.dir/,STOPPED} 2024-11-15T22:37:19,541 WARN [BP-1037677707-172.17.0.3-1731710143782 heartbeating to localhost/127.0.0.1:39371 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:37:19,541 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:37:19,541 WARN [BP-1037677707-172.17.0.3-1731710143782 heartbeating to localhost/127.0.0.1:39371 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1037677707-172.17.0.3-1731710143782 (Datanode Uuid adc56cfe-08ee-46dc-938b-64638fc7a83d) service to localhost/127.0.0.1:39371 2024-11-15T22:37:19,542 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:37:19,543 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/cluster_6e8fd9e5-47e5-4c48-2126-63d8c2abcff2/data/data3/current/BP-1037677707-172.17.0.3-1731710143782 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:19,543 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/cluster_6e8fd9e5-47e5-4c48-2126-63d8c2abcff2/data/data4/current/BP-1037677707-172.17.0.3-1731710143782 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:19,543 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:37:19,545 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:19,546 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:37:19,546 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:37:19,546 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:37:19,546 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/hadoop.log.dir/,STOPPED} 2024-11-15T22:37:19,547 WARN [BP-1037677707-172.17.0.3-1731710143782 heartbeating to localhost/127.0.0.1:39371 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:37:19,547 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:37:19,547 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:37:19,547 WARN [BP-1037677707-172.17.0.3-1731710143782 heartbeating to localhost/127.0.0.1:39371 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1037677707-172.17.0.3-1731710143782 (Datanode Uuid cac71d14-5e07-4464-b117-eddbd54b84c8) service to localhost/127.0.0.1:39371 2024-11-15T22:37:19,548 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/cluster_6e8fd9e5-47e5-4c48-2126-63d8c2abcff2/data/data1/current/BP-1037677707-172.17.0.3-1731710143782 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:19,548 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/cluster_6e8fd9e5-47e5-4c48-2126-63d8c2abcff2/data/data2/current/BP-1037677707-172.17.0.3-1731710143782 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:19,549 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:37:19,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T22:37:19,557 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:37:19,557 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:37:19,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:37:19,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/hadoop.log.dir/,STOPPED} 2024-11-15T22:37:19,566 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T22:37:19,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T22:37:19,604 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=82 (was 12) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39371 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39371 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39371 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/e611192d6313:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39371 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39371 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@1ab9ef81 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: master/e611192d6313:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/e611192d6313:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39371 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:39371 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39371 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/e611192d6313:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=70 (was 246), ProcessCount=11 (was 11), AvailableMemoryMB=4656 (was 5305) 2024-11-15T22:37:19,610 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=83, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=70, ProcessCount=11, AvailableMemoryMB=4655 2024-11-15T22:37:19,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T22:37:19,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/hadoop.log.dir so I do NOT create it in target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3 2024-11-15T22:37:19,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc4e2b6e-e0b2-facc-689c-8716d9d8f205/hadoop.tmp.dir so I do NOT create it in target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3 2024-11-15T22:37:19,610 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/cluster_29d7c4ac-b328-cc97-7a13-329239f9a74f, deleteOnExit=true 2024-11-15T22:37:19,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T22:37:19,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/test.cache.data in system properties and HBase conf 2024-11-15T22:37:19,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T22:37:19,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/hadoop.log.dir in system properties and HBase conf 2024-11-15T22:37:19,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T22:37:19,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T22:37:19,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T22:37:19,611 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T22:37:19,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T22:37:19,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T22:37:19,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T22:37:19,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T22:37:19,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T22:37:19,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T22:37:19,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T22:37:19,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T22:37:19,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T22:37:19,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/nfs.dump.dir in system properties and HBase conf 2024-11-15T22:37:19,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/java.io.tmpdir in system properties and HBase conf 2024-11-15T22:37:19,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T22:37:19,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T22:37:19,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T22:37:19,625 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T22:37:20,027 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:37:20,034 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:37:20,040 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:37:20,040 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:37:20,040 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:37:20,041 INFO [regionserver/e611192d6313:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T22:37:20,041 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:37:20,042 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f841e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:37:20,042 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30a1c2a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:37:20,139 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d95bc23{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/java.io.tmpdir/jetty-localhost-38585-hadoop-hdfs-3_4_1-tests_jar-_-any-1027202063988211979/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T22:37:20,140 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78fa6004{HTTP/1.1, (http/1.1)}{localhost:38585} 2024-11-15T22:37:20,140 INFO [Time-limited test {}] server.Server(415): Started @98298ms 2024-11-15T22:37:20,151 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T22:37:20,398 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:37:20,402 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:37:20,403 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:37:20,403 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:37:20,403 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T22:37:20,404 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e23c0c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:37:20,404 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7517d9e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:37:20,498 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d69c419{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/java.io.tmpdir/jetty-localhost-46021-hadoop-hdfs-3_4_1-tests_jar-_-any-4466626231722434803/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:20,499 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f2378c9{HTTP/1.1, (http/1.1)}{localhost:46021} 2024-11-15T22:37:20,499 INFO [Time-limited test {}] server.Server(415): Started @98657ms 2024-11-15T22:37:20,500 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:37:20,538 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:37:20,542 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:37:20,542 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:37:20,542 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:37:20,542 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:37:20,543 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ce533a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:37:20,543 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@aab268d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:37:20,639 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75434f63{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/java.io.tmpdir/jetty-localhost-37819-hadoop-hdfs-3_4_1-tests_jar-_-any-7908418157349645450/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:20,640 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a2066f8{HTTP/1.1, (http/1.1)}{localhost:37819} 2024-11-15T22:37:20,640 INFO [Time-limited test {}] server.Server(415): Started @98798ms 2024-11-15T22:37:20,641 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:37:21,708 WARN [Thread-444 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/cluster_29d7c4ac-b328-cc97-7a13-329239f9a74f/data/data1/current/BP-1214065719-172.17.0.3-1731710239637/current, will proceed with Du for space computation calculation, 2024-11-15T22:37:21,708 WARN [Thread-445 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/cluster_29d7c4ac-b328-cc97-7a13-329239f9a74f/data/data2/current/BP-1214065719-172.17.0.3-1731710239637/current, will proceed with Du for space computation calculation, 2024-11-15T22:37:21,726 WARN [Thread-408 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:37:21,729 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc3b19ff3c17db244 with lease ID 0x482115552d4f301: Processing first storage report for DS-8bb4d1ff-1b55-4b67-95df-5cbcf97f81dd from datanode DatanodeRegistration(127.0.0.1:44761, datanodeUuid=535a349a-a24e-4737-9b67-6a39f7cfb824, infoPort=36397, infoSecurePort=0, ipcPort=34753, storageInfo=lv=-57;cid=testClusterID;nsid=899425242;c=1731710239637) 2024-11-15T22:37:21,729 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc3b19ff3c17db244 with lease ID 0x482115552d4f301: from storage DS-8bb4d1ff-1b55-4b67-95df-5cbcf97f81dd node DatanodeRegistration(127.0.0.1:44761, datanodeUuid=535a349a-a24e-4737-9b67-6a39f7cfb824, infoPort=36397, infoSecurePort=0, ipcPort=34753, storageInfo=lv=-57;cid=testClusterID;nsid=899425242;c=1731710239637), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:37:21,729 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc3b19ff3c17db244 with lease ID 0x482115552d4f301: Processing first storage report for DS-527153d4-d30e-46ff-bedf-63ca7a0d6f5c from datanode DatanodeRegistration(127.0.0.1:44761, datanodeUuid=535a349a-a24e-4737-9b67-6a39f7cfb824, infoPort=36397, infoSecurePort=0, ipcPort=34753, storageInfo=lv=-57;cid=testClusterID;nsid=899425242;c=1731710239637) 2024-11-15T22:37:21,729 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc3b19ff3c17db244 with lease ID 0x482115552d4f301: from storage DS-527153d4-d30e-46ff-bedf-63ca7a0d6f5c node DatanodeRegistration(127.0.0.1:44761, datanodeUuid=535a349a-a24e-4737-9b67-6a39f7cfb824, infoPort=36397, infoSecurePort=0, ipcPort=34753, storageInfo=lv=-57;cid=testClusterID;nsid=899425242;c=1731710239637), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:37:21,829 WARN [Thread-455 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/cluster_29d7c4ac-b328-cc97-7a13-329239f9a74f/data/data3/current/BP-1214065719-172.17.0.3-1731710239637/current, will proceed with Du for space computation calculation, 2024-11-15T22:37:21,829 WARN [Thread-456 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/cluster_29d7c4ac-b328-cc97-7a13-329239f9a74f/data/data4/current/BP-1214065719-172.17.0.3-1731710239637/current, will proceed with Du for space computation calculation, 2024-11-15T22:37:21,848 WARN [Thread-431 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:37:21,851 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdd8230c32984257c with lease ID 0x482115552d4f302: Processing first storage report for DS-3f07d822-0389-4084-9cec-bbacfaf356d3 from datanode DatanodeRegistration(127.0.0.1:39373, datanodeUuid=f94ac678-8fe6-4cb2-9d31-d9d6f0340ad1, infoPort=34523, infoSecurePort=0, ipcPort=41369, storageInfo=lv=-57;cid=testClusterID;nsid=899425242;c=1731710239637) 2024-11-15T22:37:21,851 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd8230c32984257c with lease ID 0x482115552d4f302: from storage DS-3f07d822-0389-4084-9cec-bbacfaf356d3 node DatanodeRegistration(127.0.0.1:39373, datanodeUuid=f94ac678-8fe6-4cb2-9d31-d9d6f0340ad1, infoPort=34523, infoSecurePort=0, ipcPort=41369, storageInfo=lv=-57;cid=testClusterID;nsid=899425242;c=1731710239637), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:37:21,851 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdd8230c32984257c with lease ID 0x482115552d4f302: Processing first storage report for DS-d36af08e-168c-44a9-90b6-19a4c7155e34 from datanode DatanodeRegistration(127.0.0.1:39373, datanodeUuid=f94ac678-8fe6-4cb2-9d31-d9d6f0340ad1, infoPort=34523, infoSecurePort=0, ipcPort=41369, storageInfo=lv=-57;cid=testClusterID;nsid=899425242;c=1731710239637) 2024-11-15T22:37:21,851 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd8230c32984257c with lease ID 0x482115552d4f302: from storage DS-d36af08e-168c-44a9-90b6-19a4c7155e34 node DatanodeRegistration(127.0.0.1:39373, datanodeUuid=f94ac678-8fe6-4cb2-9d31-d9d6f0340ad1, infoPort=34523, infoSecurePort=0, ipcPort=41369, storageInfo=lv=-57;cid=testClusterID;nsid=899425242;c=1731710239637), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:37:21,891 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3 2024-11-15T22:37:21,895 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/cluster_29d7c4ac-b328-cc97-7a13-329239f9a74f/zookeeper_0, clientPort=62160, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/cluster_29d7c4ac-b328-cc97-7a13-329239f9a74f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/cluster_29d7c4ac-b328-cc97-7a13-329239f9a74f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T22:37:21,896 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62160 2024-11-15T22:37:21,896 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:21,897 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:21,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741825_1001 (size=7) 2024-11-15T22:37:21,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741825_1001 (size=7) 2024-11-15T22:37:21,909 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52 with version=8 2024-11-15T22:37:21,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/hbase-staging 2024-11-15T22:37:21,911 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e611192d6313:0 server-side Connection retries=45 2024-11-15T22:37:21,911 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:37:21,911 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T22:37:21,912 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T22:37:21,912 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:37:21,912 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T22:37:21,912 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T22:37:21,912 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T22:37:21,913 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34863 2024-11-15T22:37:21,914 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34863 connecting to ZooKeeper ensemble=127.0.0.1:62160 2024-11-15T22:37:21,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:348630x0, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T22:37:21,966 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34863-0x10140a548120000 connected 2024-11-15T22:37:22,050 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:22,054 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:22,060 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:37:22,061 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52, hbase.cluster.distributed=false 2024-11-15T22:37:22,064 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T22:37:22,065 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34863 2024-11-15T22:37:22,065 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34863 2024-11-15T22:37:22,065 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34863 2024-11-15T22:37:22,065 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34863 2024-11-15T22:37:22,066 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34863 2024-11-15T22:37:22,081 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e611192d6313:0 server-side Connection retries=45 2024-11-15T22:37:22,081 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:37:22,081 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T22:37:22,081 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T22:37:22,081 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:37:22,081 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T22:37:22,081 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T22:37:22,082 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T22:37:22,082 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43179 2024-11-15T22:37:22,084 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43179 connecting to ZooKeeper ensemble=127.0.0.1:62160 2024-11-15T22:37:22,085 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:22,086 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:22,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:431790x0, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T22:37:22,102 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:37:22,102 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43179-0x10140a548120001 connected 2024-11-15T22:37:22,103 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T22:37:22,104 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T22:37:22,105 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T22:37:22,106 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T22:37:22,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43179 2024-11-15T22:37:22,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43179 2024-11-15T22:37:22,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43179 2024-11-15T22:37:22,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43179 2024-11-15T22:37:22,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43179 2024-11-15T22:37:22,122 DEBUG [M:0;e611192d6313:34863 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e611192d6313:34863 2024-11-15T22:37:22,123 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e611192d6313,34863,1731710241911 2024-11-15T22:37:22,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:37:22,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:37:22,134 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e611192d6313,34863,1731710241911 2024-11-15T22:37:22,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T22:37:22,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:22,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:22,144 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T22:37:22,145 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e611192d6313,34863,1731710241911 from backup master directory 2024-11-15T22:37:22,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e611192d6313,34863,1731710241911 2024-11-15T22:37:22,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:37:22,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:37:22,154 WARN [master/e611192d6313:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T22:37:22,154 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e611192d6313,34863,1731710241911 2024-11-15T22:37:22,160 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/hbase.id] with ID: cc72a11d-b5ca-4281-9deb-a111a03aae5d 2024-11-15T22:37:22,161 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/.tmp/hbase.id 2024-11-15T22:37:22,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741826_1002 (size=42) 2024-11-15T22:37:22,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741826_1002 (size=42) 2024-11-15T22:37:22,170 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/.tmp/hbase.id]:[hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/hbase.id] 2024-11-15T22:37:22,184 INFO [master/e611192d6313:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:22,184 INFO [master/e611192d6313:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T22:37:22,186 INFO [master/e611192d6313:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-15T22:37:22,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:22,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:22,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741827_1003 (size=196) 2024-11-15T22:37:22,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741827_1003 (size=196) 2024-11-15T22:37:22,205 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T22:37:22,206 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T22:37:22,206 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:37:22,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741828_1004 (size=1189) 2024-11-15T22:37:22,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741828_1004 (size=1189) 2024-11-15T22:37:22,220 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store 2024-11-15T22:37:22,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741829_1005 (size=34) 2024-11-15T22:37:22,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741829_1005 (size=34) 2024-11-15T22:37:22,229 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:37:22,229 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T22:37:22,229 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:22,229 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:22,229 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T22:37:22,229 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:22,230 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:22,230 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731710242229Disabling compacts and flushes for region at 1731710242229Disabling writes for close at 1731710242229Writing region close event to WAL at 1731710242230 (+1 ms)Closed at 1731710242230 2024-11-15T22:37:22,231 WARN [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/.initializing 2024-11-15T22:37:22,231 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/WALs/e611192d6313,34863,1731710241911 2024-11-15T22:37:22,235 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C34863%2C1731710241911, suffix=, logDir=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/WALs/e611192d6313,34863,1731710241911, archiveDir=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/oldWALs, maxLogs=10 2024-11-15T22:37:22,236 INFO [master/e611192d6313:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C34863%2C1731710241911.1731710242236 2024-11-15T22:37:22,241 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/WALs/e611192d6313,34863,1731710241911/e611192d6313%2C34863%2C1731710241911.1731710242236 2024-11-15T22:37:22,242 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34523:34523),(127.0.0.1/127.0.0.1:36397:36397)] 2024-11-15T22:37:22,243 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:37:22,243 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:37:22,243 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:22,243 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:22,247 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:22,249 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T22:37:22,249 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:22,250 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:22,250 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:22,252 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T22:37:22,252 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:22,253 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:37:22,253 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:22,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T22:37:22,256 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:22,257 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:37:22,257 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:22,258 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T22:37:22,258 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:22,259 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:37:22,259 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:22,260 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:22,261 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:22,263 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:22,263 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:22,264 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T22:37:22,265 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:22,269 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:37:22,270 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722697, jitterRate=-0.08104388415813446}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T22:37:22,271 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731710242243Initializing all the Stores at 1731710242245 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710242245Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710242247 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710242247Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710242247Cleaning up temporary data from old regions at 1731710242263 (+16 ms)Region opened successfully at 1731710242271 (+8 ms) 2024-11-15T22:37:22,272 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T22:37:22,278 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31e0c5b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e611192d6313/172.17.0.3:0 2024-11-15T22:37:22,279 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T22:37:22,279 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T22:37:22,279 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T22:37:22,280 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T22:37:22,281 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T22:37:22,281 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T22:37:22,281 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T22:37:22,284 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T22:37:22,285 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T22:37:22,291 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T22:37:22,291 INFO [master/e611192d6313:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T22:37:22,292 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T22:37:22,301 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T22:37:22,302 INFO [master/e611192d6313:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T22:37:22,303 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T22:37:22,312 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T22:37:22,313 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T22:37:22,323 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T22:37:22,325 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T22:37:22,333 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T22:37:22,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T22:37:22,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T22:37:22,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:22,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:22,345 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e611192d6313,34863,1731710241911, sessionid=0x10140a548120000, setting cluster-up flag (Was=false) 2024-11-15T22:37:22,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:22,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:22,396 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T22:37:22,400 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e611192d6313,34863,1731710241911 2024-11-15T22:37:22,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:22,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:22,449 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T22:37:22,453 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e611192d6313,34863,1731710241911 2024-11-15T22:37:22,456 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T22:37:22,459 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T22:37:22,459 INFO [master/e611192d6313:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T22:37:22,459 INFO [master/e611192d6313:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T22:37:22,459 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e611192d6313,34863,1731710241911 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T22:37:22,462 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:37:22,462 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:37:22,462 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:37:22,462 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:37:22,462 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e611192d6313:0, corePoolSize=10, maxPoolSize=10 2024-11-15T22:37:22,462 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:22,462 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e611192d6313:0, corePoolSize=2, maxPoolSize=2 2024-11-15T22:37:22,462 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:22,463 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731710272463 2024-11-15T22:37:22,464 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T22:37:22,464 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T22:37:22,464 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T22:37:22,464 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T22:37:22,464 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T22:37:22,464 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T22:37:22,464 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:22,465 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:37:22,465 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T22:37:22,465 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T22:37:22,465 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T22:37:22,465 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T22:37:22,465 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T22:37:22,465 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T22:37:22,466 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710242466,5,FailOnTimeoutGroup] 2024-11-15T22:37:22,466 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710242466,5,FailOnTimeoutGroup] 2024-11-15T22:37:22,466 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:22,466 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T22:37:22,466 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:22,466 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:22,467 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:22,467 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T22:37:22,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741831_1007 (size=1321) 2024-11-15T22:37:22,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741831_1007 (size=1321) 2024-11-15T22:37:22,477 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T22:37:22,478 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52 2024-11-15T22:37:22,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741832_1008 (size=32) 2024-11-15T22:37:22,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741832_1008 (size=32) 2024-11-15T22:37:22,489 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:37:22,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T22:37:22,495 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T22:37:22,495 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:22,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:22,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T22:37:22,497 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T22:37:22,497 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:22,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:22,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T22:37:22,500 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T22:37:22,500 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:22,500 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:22,500 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T22:37:22,502 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T22:37:22,502 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:22,503 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:22,503 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T22:37:22,504 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/data/hbase/meta/1588230740 2024-11-15T22:37:22,504 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/data/hbase/meta/1588230740 2024-11-15T22:37:22,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T22:37:22,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T22:37:22,506 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T22:37:22,507 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T22:37:22,510 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:37:22,510 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824241, jitterRate=0.04807765781879425}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T22:37:22,511 INFO [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer(746): ClusterId : cc72a11d-b5ca-4281-9deb-a111a03aae5d 2024-11-15T22:37:22,511 DEBUG [RS:0;e611192d6313:43179 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T22:37:22,512 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731710242489Initializing all the Stores at 1731710242491 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710242491Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710242491Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710242491Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710242491Cleaning up temporary data from old regions at 1731710242505 (+14 ms)Region opened successfully at 1731710242512 (+7 ms) 2024-11-15T22:37:22,512 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T22:37:22,512 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T22:37:22,512 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T22:37:22,512 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T22:37:22,512 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T22:37:22,512 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T22:37:22,512 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731710242512Disabling compacts and flushes for region at 1731710242512Disabling writes for close at 1731710242512Writing region close event to WAL at 1731710242512Closed at 1731710242512 2024-11-15T22:37:22,514 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:37:22,514 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T22:37:22,514 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T22:37:22,516 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T22:37:22,517 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T22:37:22,523 DEBUG [RS:0;e611192d6313:43179 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T22:37:22,523 DEBUG [RS:0;e611192d6313:43179 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T22:37:22,534 DEBUG [RS:0;e611192d6313:43179 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T22:37:22,535 DEBUG [RS:0;e611192d6313:43179 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cdc4eea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e611192d6313/172.17.0.3:0 2024-11-15T22:37:22,545 DEBUG [RS:0;e611192d6313:43179 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e611192d6313:43179 2024-11-15T22:37:22,546 INFO [RS:0;e611192d6313:43179 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T22:37:22,546 INFO [RS:0;e611192d6313:43179 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T22:37:22,546 DEBUG [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T22:37:22,546 INFO [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer(2659): reportForDuty to master=e611192d6313,34863,1731710241911 with port=43179, startcode=1731710242081 2024-11-15T22:37:22,547 DEBUG [RS:0;e611192d6313:43179 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T22:37:22,549 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51625, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T22:37:22,550 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34863 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e611192d6313,43179,1731710242081 2024-11-15T22:37:22,550 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34863 {}] master.ServerManager(517): Registering regionserver=e611192d6313,43179,1731710242081 2024-11-15T22:37:22,552 DEBUG [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52 2024-11-15T22:37:22,552 DEBUG [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36437 2024-11-15T22:37:22,552 DEBUG [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T22:37:22,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:37:22,624 DEBUG [RS:0;e611192d6313:43179 {}] zookeeper.ZKUtil(111): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e611192d6313,43179,1731710242081 2024-11-15T22:37:22,625 WARN [RS:0;e611192d6313:43179 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T22:37:22,625 INFO [RS:0;e611192d6313:43179 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:37:22,626 DEBUG [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/WALs/e611192d6313,43179,1731710242081 2024-11-15T22:37:22,626 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e611192d6313,43179,1731710242081] 2024-11-15T22:37:22,632 INFO [RS:0;e611192d6313:43179 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T22:37:22,635 INFO [RS:0;e611192d6313:43179 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T22:37:22,635 INFO [RS:0;e611192d6313:43179 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T22:37:22,635 INFO [RS:0;e611192d6313:43179 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:22,636 INFO [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T22:37:22,637 INFO [RS:0;e611192d6313:43179 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T22:37:22,637 INFO [RS:0;e611192d6313:43179 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:22,637 DEBUG [RS:0;e611192d6313:43179 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:22,637 DEBUG [RS:0;e611192d6313:43179 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:22,637 DEBUG [RS:0;e611192d6313:43179 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:22,637 DEBUG [RS:0;e611192d6313:43179 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:22,638 DEBUG [RS:0;e611192d6313:43179 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:22,638 DEBUG [RS:0;e611192d6313:43179 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e611192d6313:0, corePoolSize=2, maxPoolSize=2 2024-11-15T22:37:22,638 DEBUG [RS:0;e611192d6313:43179 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:22,638 DEBUG [RS:0;e611192d6313:43179 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:22,638 DEBUG [RS:0;e611192d6313:43179 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:22,638 DEBUG [RS:0;e611192d6313:43179 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:22,638 DEBUG [RS:0;e611192d6313:43179 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:22,638 DEBUG [RS:0;e611192d6313:43179 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:22,638 DEBUG [RS:0;e611192d6313:43179 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:37:22,639 DEBUG [RS:0;e611192d6313:43179 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:37:22,639 INFO [RS:0;e611192d6313:43179 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:22,639 INFO [RS:0;e611192d6313:43179 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:22,639 INFO [RS:0;e611192d6313:43179 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:22,639 INFO [RS:0;e611192d6313:43179 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:22,639 INFO [RS:0;e611192d6313:43179 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:22,639 INFO [RS:0;e611192d6313:43179 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,43179,1731710242081-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T22:37:22,657 INFO [RS:0;e611192d6313:43179 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T22:37:22,657 INFO [RS:0;e611192d6313:43179 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,43179,1731710242081-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:22,657 INFO [RS:0;e611192d6313:43179 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:22,657 INFO [RS:0;e611192d6313:43179 {}] regionserver.Replication(171): e611192d6313,43179,1731710242081 started 2024-11-15T22:37:22,668 WARN [e611192d6313:34863 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T22:37:22,670 INFO [RS:0;e611192d6313:43179 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:22,670 INFO [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer(1482): Serving as e611192d6313,43179,1731710242081, RpcServer on e611192d6313/172.17.0.3:43179, sessionid=0x10140a548120001 2024-11-15T22:37:22,671 DEBUG [RS:0;e611192d6313:43179 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T22:37:22,671 DEBUG [RS:0;e611192d6313:43179 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e611192d6313,43179,1731710242081 2024-11-15T22:37:22,671 DEBUG [RS:0;e611192d6313:43179 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,43179,1731710242081' 2024-11-15T22:37:22,671 DEBUG [RS:0;e611192d6313:43179 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T22:37:22,671 DEBUG [RS:0;e611192d6313:43179 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T22:37:22,672 DEBUG [RS:0;e611192d6313:43179 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T22:37:22,672 DEBUG [RS:0;e611192d6313:43179 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T22:37:22,672 DEBUG [RS:0;e611192d6313:43179 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e611192d6313,43179,1731710242081 2024-11-15T22:37:22,672 DEBUG [RS:0;e611192d6313:43179 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,43179,1731710242081' 2024-11-15T22:37:22,672 DEBUG [RS:0;e611192d6313:43179 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T22:37:22,673 DEBUG [RS:0;e611192d6313:43179 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T22:37:22,673 DEBUG [RS:0;e611192d6313:43179 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T22:37:22,673 INFO [RS:0;e611192d6313:43179 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T22:37:22,673 INFO [RS:0;e611192d6313:43179 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T22:37:22,780 INFO [RS:0;e611192d6313:43179 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C43179%2C1731710242081, suffix=, logDir=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/WALs/e611192d6313,43179,1731710242081, archiveDir=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/oldWALs, maxLogs=32 2024-11-15T22:37:22,784 INFO [RS:0;e611192d6313:43179 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C43179%2C1731710242081.1731710242784 2024-11-15T22:37:22,790 INFO [RS:0;e611192d6313:43179 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/WALs/e611192d6313,43179,1731710242081/e611192d6313%2C43179%2C1731710242081.1731710242784 2024-11-15T22:37:22,791 DEBUG [RS:0;e611192d6313:43179 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34523:34523),(127.0.0.1/127.0.0.1:36397:36397)] 2024-11-15T22:37:22,918 DEBUG [e611192d6313:34863 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T22:37:22,920 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e611192d6313,43179,1731710242081 2024-11-15T22:37:22,925 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e611192d6313,43179,1731710242081, state=OPENING 2024-11-15T22:37:22,975 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T22:37:22,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:22,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:22,987 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T22:37:22,988 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e611192d6313,43179,1731710242081}] 2024-11-15T22:37:22,988 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:37:22,988 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:37:23,145 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T22:37:23,151 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35135, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T22:37:23,159 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T22:37:23,159 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:37:23,162 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C43179%2C1731710242081.meta, suffix=.meta, logDir=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/WALs/e611192d6313,43179,1731710242081, archiveDir=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/oldWALs, maxLogs=32 2024-11-15T22:37:23,164 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C43179%2C1731710242081.meta.1731710243164.meta 2024-11-15T22:37:23,171 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/WALs/e611192d6313,43179,1731710242081/e611192d6313%2C43179%2C1731710242081.meta.1731710243164.meta 2024-11-15T22:37:23,172 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36397:36397),(127.0.0.1/127.0.0.1:34523:34523)] 2024-11-15T22:37:23,172 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:37:23,173 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T22:37:23,173 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T22:37:23,173 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T22:37:23,173 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T22:37:23,173 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:37:23,173 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T22:37:23,173 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T22:37:23,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T22:37:23,176 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T22:37:23,176 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:23,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:23,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T22:37:23,177 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T22:37:23,178 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:23,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:23,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T22:37:23,179 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T22:37:23,179 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:23,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:23,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T22:37:23,181 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T22:37:23,181 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:23,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:23,182 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T22:37:23,183 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/data/hbase/meta/1588230740 2024-11-15T22:37:23,184 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/data/hbase/meta/1588230740 2024-11-15T22:37:23,186 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T22:37:23,186 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T22:37:23,187 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T22:37:23,189 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T22:37:23,190 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=843802, jitterRate=0.0729503184556961}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T22:37:23,190 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T22:37:23,191 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731710243174Writing region info on filesystem at 1731710243174Initializing all the Stores at 1731710243175 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710243175Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710243175Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710243175Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710243175Cleaning up temporary data from old regions at 1731710243186 (+11 ms)Running coprocessor post-open hooks at 1731710243190 (+4 ms)Region opened successfully at 1731710243191 (+1 ms) 2024-11-15T22:37:23,193 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731710243145 2024-11-15T22:37:23,195 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T22:37:23,195 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T22:37:23,196 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e611192d6313,43179,1731710242081 2024-11-15T22:37:23,198 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e611192d6313,43179,1731710242081, state=OPEN 2024-11-15T22:37:23,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T22:37:23,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T22:37:23,419 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e611192d6313,43179,1731710242081 2024-11-15T22:37:23,419 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:37:23,419 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:37:23,428 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T22:37:23,428 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e611192d6313,43179,1731710242081 in 431 msec 2024-11-15T22:37:23,433 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T22:37:23,433 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 914 msec 2024-11-15T22:37:23,434 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:37:23,434 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T22:37:23,436 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T22:37:23,436 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e611192d6313,43179,1731710242081, seqNum=-1] 2024-11-15T22:37:23,437 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T22:37:23,438 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38503, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T22:37:23,445 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 987 msec 2024-11-15T22:37:23,445 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731710243445, completionTime=-1 2024-11-15T22:37:23,445 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T22:37:23,445 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T22:37:23,447 INFO [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T22:37:23,447 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731710303447 2024-11-15T22:37:23,448 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731710363448 2024-11-15T22:37:23,448 INFO [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-15T22:37:23,448 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34863,1731710241911-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:23,448 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34863,1731710241911-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:23,448 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34863,1731710241911-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:23,448 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e611192d6313:34863, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:23,448 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:23,449 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:23,450 DEBUG [master/e611192d6313:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T22:37:23,453 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.299sec 2024-11-15T22:37:23,453 INFO [master/e611192d6313:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T22:37:23,453 INFO [master/e611192d6313:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T22:37:23,453 INFO [master/e611192d6313:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T22:37:23,453 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T22:37:23,454 INFO [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T22:37:23,454 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34863,1731710241911-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T22:37:23,454 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34863,1731710241911-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T22:37:23,457 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T22:37:23,457 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T22:37:23,457 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34863,1731710241911-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:23,512 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e299140, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:37:23,512 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e611192d6313,34863,-1 for getting cluster id 2024-11-15T22:37:23,512 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T22:37:23,514 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cc72a11d-b5ca-4281-9deb-a111a03aae5d' 2024-11-15T22:37:23,515 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T22:37:23,515 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cc72a11d-b5ca-4281-9deb-a111a03aae5d" 2024-11-15T22:37:23,516 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7047af98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:37:23,516 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e611192d6313,34863,-1] 2024-11-15T22:37:23,516 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T22:37:23,516 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:37:23,518 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52782, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T22:37:23,519 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73a78491, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:37:23,519 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T22:37:23,520 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e611192d6313,43179,1731710242081, seqNum=-1] 2024-11-15T22:37:23,520 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T22:37:23,522 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34856, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T22:37:23,524 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e611192d6313,34863,1731710241911 2024-11-15T22:37:23,524 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:23,528 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T22:37:23,528 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T22:37:23,528 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T22:37:23,528 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:37:23,528 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:37:23,528 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:37:23,528 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T22:37:23,529 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T22:37:23,529 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1712408714, stopped=false 2024-11-15T22:37:23,529 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e611192d6313,34863,1731710241911 2024-11-15T22:37:23,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T22:37:23,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T22:37:23,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:23,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:23,544 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T22:37:23,544 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T22:37:23,544 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:37:23,544 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:37:23,544 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:37:23,544 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:37:23,545 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e611192d6313,43179,1731710242081' ***** 2024-11-15T22:37:23,545 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T22:37:23,545 INFO [RS:0;e611192d6313:43179 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T22:37:23,545 INFO [RS:0;e611192d6313:43179 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T22:37:23,545 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T22:37:23,545 INFO [RS:0;e611192d6313:43179 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T22:37:23,545 INFO [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer(959): stopping server e611192d6313,43179,1731710242081 2024-11-15T22:37:23,545 INFO [RS:0;e611192d6313:43179 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T22:37:23,545 INFO [RS:0;e611192d6313:43179 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e611192d6313:43179. 2024-11-15T22:37:23,545 DEBUG [RS:0;e611192d6313:43179 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:37:23,545 DEBUG [RS:0;e611192d6313:43179 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:37:23,545 INFO [RS:0;e611192d6313:43179 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T22:37:23,546 INFO [RS:0;e611192d6313:43179 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T22:37:23,546 INFO [RS:0;e611192d6313:43179 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T22:37:23,546 INFO [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T22:37:23,546 INFO [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-15T22:37:23,546 DEBUG [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-15T22:37:23,546 DEBUG [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-15T22:37:23,546 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T22:37:23,546 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T22:37:23,546 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T22:37:23,546 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T22:37:23,546 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T22:37:23,546 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-15T22:37:23,563 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/data/hbase/meta/1588230740/.tmp/ns/4adaf5bc12b24496814676fb85d3c76f is 43, key is default/ns:d/1731710243439/Put/seqid=0 2024-11-15T22:37:23,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741835_1011 (size=5153) 2024-11-15T22:37:23,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741835_1011 (size=5153) 2024-11-15T22:37:23,569 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/data/hbase/meta/1588230740/.tmp/ns/4adaf5bc12b24496814676fb85d3c76f 2024-11-15T22:37:23,578 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/data/hbase/meta/1588230740/.tmp/ns/4adaf5bc12b24496814676fb85d3c76f as hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/data/hbase/meta/1588230740/ns/4adaf5bc12b24496814676fb85d3c76f 2024-11-15T22:37:23,585 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/data/hbase/meta/1588230740/ns/4adaf5bc12b24496814676fb85d3c76f, entries=2, sequenceid=6, filesize=5.0 K 2024-11-15T22:37:23,587 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 40ms, sequenceid=6, compaction requested=false 2024-11-15T22:37:23,587 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T22:37:23,592 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T22:37:23,593 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T22:37:23,593 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T22:37:23,593 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731710243546Running coprocessor pre-close hooks at 1731710243546Disabling compacts and flushes for region at 1731710243546Disabling writes for close at 1731710243546Obtaining lock to block concurrent updates at 1731710243546Preparing flush snapshotting stores in 1588230740 at 1731710243546Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731710243547 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731710243548 (+1 ms)Flushing 1588230740/ns: creating writer at 1731710243548Flushing 1588230740/ns: appending metadata at 1731710243563 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731710243563Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1cccbdbf: reopening flushed file at 1731710243577 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 40ms, sequenceid=6, compaction requested=false at 1731710243587 (+10 ms)Writing region close event to WAL at 1731710243588 (+1 ms)Running coprocessor post-close hooks at 1731710243593 (+5 ms)Closed at 1731710243593 2024-11-15T22:37:23,594 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T22:37:23,640 INFO [regionserver/e611192d6313:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T22:37:23,640 INFO [regionserver/e611192d6313:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T22:37:23,746 INFO [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer(976): stopping server e611192d6313,43179,1731710242081; all regions closed. 2024-11-15T22:37:23,747 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:23,748 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:23,748 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:23,748 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:23,748 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:23,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741834_1010 (size=1152) 2024-11-15T22:37:23,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741834_1010 (size=1152) 2024-11-15T22:37:23,760 DEBUG [RS:0;e611192d6313:43179 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/oldWALs 2024-11-15T22:37:23,760 INFO [RS:0;e611192d6313:43179 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e611192d6313%2C43179%2C1731710242081.meta:.meta(num 1731710243164) 2024-11-15T22:37:23,760 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:23,761 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:23,761 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:23,761 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:23,761 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:23,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741833_1009 (size=93) 2024-11-15T22:37:23,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741833_1009 (size=93) 2024-11-15T22:37:23,768 DEBUG [RS:0;e611192d6313:43179 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/oldWALs 2024-11-15T22:37:23,768 INFO [RS:0;e611192d6313:43179 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e611192d6313%2C43179%2C1731710242081:(num 1731710242784) 2024-11-15T22:37:23,768 DEBUG [RS:0;e611192d6313:43179 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:37:23,768 INFO [RS:0;e611192d6313:43179 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T22:37:23,769 INFO [RS:0;e611192d6313:43179 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T22:37:23,769 INFO [RS:0;e611192d6313:43179 {}] hbase.ChoreService(370): Chore service for: regionserver/e611192d6313:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T22:37:23,769 INFO [RS:0;e611192d6313:43179 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T22:37:23,769 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T22:37:23,769 INFO [RS:0;e611192d6313:43179 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43179 2024-11-15T22:37:23,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e611192d6313,43179,1731710242081 2024-11-15T22:37:23,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:37:23,775 INFO [RS:0;e611192d6313:43179 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T22:37:23,786 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e611192d6313,43179,1731710242081] 2024-11-15T22:37:23,796 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e611192d6313,43179,1731710242081 already deleted, retry=false 2024-11-15T22:37:23,796 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e611192d6313,43179,1731710242081 expired; onlineServers=0 2024-11-15T22:37:23,796 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e611192d6313,34863,1731710241911' ***** 2024-11-15T22:37:23,796 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T22:37:23,796 INFO [M:0;e611192d6313:34863 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T22:37:23,796 INFO [M:0;e611192d6313:34863 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T22:37:23,797 DEBUG [M:0;e611192d6313:34863 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T22:37:23,797 DEBUG [M:0;e611192d6313:34863 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T22:37:23,797 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T22:37:23,797 DEBUG [master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710242466 {}] cleaner.HFileCleaner(306): Exit Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710242466,5,FailOnTimeoutGroup] 2024-11-15T22:37:23,797 DEBUG [master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710242466 {}] cleaner.HFileCleaner(306): Exit Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710242466,5,FailOnTimeoutGroup] 2024-11-15T22:37:23,797 INFO [M:0;e611192d6313:34863 {}] hbase.ChoreService(370): Chore service for: master/e611192d6313:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T22:37:23,797 INFO [M:0;e611192d6313:34863 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T22:37:23,798 DEBUG [M:0;e611192d6313:34863 {}] master.HMaster(1795): Stopping service threads 2024-11-15T22:37:23,798 INFO [M:0;e611192d6313:34863 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T22:37:23,798 INFO [M:0;e611192d6313:34863 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T22:37:23,798 INFO [M:0;e611192d6313:34863 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T22:37:23,798 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T22:37:23,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T22:37:23,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:23,807 DEBUG [M:0;e611192d6313:34863 {}] zookeeper.ZKUtil(347): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T22:37:23,807 WARN [M:0;e611192d6313:34863 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T22:37:23,809 INFO [M:0;e611192d6313:34863 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/.lastflushedseqids 2024-11-15T22:37:23,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741836_1012 (size=99) 2024-11-15T22:37:23,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741836_1012 (size=99) 2024-11-15T22:37:23,816 INFO [M:0;e611192d6313:34863 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T22:37:23,817 INFO [M:0;e611192d6313:34863 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T22:37:23,817 DEBUG [M:0;e611192d6313:34863 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T22:37:23,817 INFO [M:0;e611192d6313:34863 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:23,817 DEBUG [M:0;e611192d6313:34863 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:23,817 DEBUG [M:0;e611192d6313:34863 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T22:37:23,817 DEBUG [M:0;e611192d6313:34863 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:23,817 INFO [M:0;e611192d6313:34863 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-15T22:37:23,833 DEBUG [M:0;e611192d6313:34863 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9b109af824344247bd2bbf25c5847275 is 82, key is hbase:meta,,1/info:regioninfo/1731710243196/Put/seqid=0 2024-11-15T22:37:23,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741837_1013 (size=5672) 2024-11-15T22:37:23,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741837_1013 (size=5672) 2024-11-15T22:37:23,839 INFO [M:0;e611192d6313:34863 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9b109af824344247bd2bbf25c5847275 2024-11-15T22:37:23,861 DEBUG [M:0;e611192d6313:34863 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/28a5babf008a469e8afa1120398d970a is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731710243444/Put/seqid=0 2024-11-15T22:37:23,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741838_1014 (size=5275) 2024-11-15T22:37:23,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741838_1014 (size=5275) 2024-11-15T22:37:23,866 INFO [M:0;e611192d6313:34863 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/28a5babf008a469e8afa1120398d970a 2024-11-15T22:37:23,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:37:23,886 INFO [RS:0;e611192d6313:43179 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T22:37:23,886 INFO [RS:0;e611192d6313:43179 {}] regionserver.HRegionServer(1031): Exiting; stopping=e611192d6313,43179,1731710242081; zookeeper connection closed. 2024-11-15T22:37:23,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43179-0x10140a548120001, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:37:23,886 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2f4a5e9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2f4a5e9 2024-11-15T22:37:23,886 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T22:37:23,888 DEBUG [M:0;e611192d6313:34863 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ca3967742c83440c9dff5d9059f54552 is 69, key is e611192d6313,43179,1731710242081/rs:state/1731710242550/Put/seqid=0 2024-11-15T22:37:23,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741839_1015 (size=5156) 2024-11-15T22:37:23,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741839_1015 (size=5156) 2024-11-15T22:37:23,894 INFO [M:0;e611192d6313:34863 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ca3967742c83440c9dff5d9059f54552 2024-11-15T22:37:23,920 DEBUG [M:0;e611192d6313:34863 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/50ac5b12fd264510a27c1ec4cd4d8181 is 52, key is load_balancer_on/state:d/1731710243526/Put/seqid=0 2024-11-15T22:37:23,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741840_1016 (size=5056) 2024-11-15T22:37:23,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741840_1016 (size=5056) 2024-11-15T22:37:23,926 INFO [M:0;e611192d6313:34863 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/50ac5b12fd264510a27c1ec4cd4d8181 2024-11-15T22:37:23,933 DEBUG [M:0;e611192d6313:34863 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9b109af824344247bd2bbf25c5847275 as hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9b109af824344247bd2bbf25c5847275 2024-11-15T22:37:23,940 INFO [M:0;e611192d6313:34863 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9b109af824344247bd2bbf25c5847275, entries=8, sequenceid=29, filesize=5.5 K 2024-11-15T22:37:23,941 DEBUG [M:0;e611192d6313:34863 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/28a5babf008a469e8afa1120398d970a as hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/28a5babf008a469e8afa1120398d970a 2024-11-15T22:37:23,947 INFO [M:0;e611192d6313:34863 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/28a5babf008a469e8afa1120398d970a, entries=3, sequenceid=29, filesize=5.2 K 2024-11-15T22:37:23,948 DEBUG [M:0;e611192d6313:34863 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ca3967742c83440c9dff5d9059f54552 as hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ca3967742c83440c9dff5d9059f54552 2024-11-15T22:37:23,954 INFO [M:0;e611192d6313:34863 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ca3967742c83440c9dff5d9059f54552, entries=1, sequenceid=29, filesize=5.0 K 2024-11-15T22:37:23,956 DEBUG [M:0;e611192d6313:34863 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/50ac5b12fd264510a27c1ec4cd4d8181 as hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/50ac5b12fd264510a27c1ec4cd4d8181 2024-11-15T22:37:23,962 INFO [M:0;e611192d6313:34863 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36437/user/jenkins/test-data/b9e63b07-4902-1879-708c-1136984e3d52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/50ac5b12fd264510a27c1ec4cd4d8181, entries=1, sequenceid=29, filesize=4.9 K 2024-11-15T22:37:23,963 INFO [M:0;e611192d6313:34863 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=29, compaction requested=false 2024-11-15T22:37:23,965 INFO [M:0;e611192d6313:34863 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:23,965 DEBUG [M:0;e611192d6313:34863 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731710243817Disabling compacts and flushes for region at 1731710243817Disabling writes for close at 1731710243817Obtaining lock to block concurrent updates at 1731710243817Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731710243817Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731710243818 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731710243818Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731710243818Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731710243832 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731710243832Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731710243844 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731710243860 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731710243860Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731710243871 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731710243888 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731710243888Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731710243900 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731710243919 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731710243919Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30fe502c: reopening flushed file at 1731710243932 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45746f7b: reopening flushed file at 1731710243940 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a07ca06: reopening flushed file at 1731710243947 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10da8e4f: reopening flushed file at 1731710243954 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=29, compaction requested=false at 1731710243963 (+9 ms)Writing region close event to WAL at 1731710243965 (+2 ms)Closed at 1731710243965 2024-11-15T22:37:23,966 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:23,966 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:23,966 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:23,967 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:23,967 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:23,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44761 is added to blk_1073741830_1006 (size=10311) 2024-11-15T22:37:23,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39373 is added to blk_1073741830_1006 (size=10311) 2024-11-15T22:37:23,970 INFO [M:0;e611192d6313:34863 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T22:37:23,970 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T22:37:23,970 INFO [M:0;e611192d6313:34863 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34863 2024-11-15T22:37:23,970 INFO [M:0;e611192d6313:34863 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T22:37:24,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:37:24,032 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:37:24,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:37:24,075 INFO [M:0;e611192d6313:34863 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T22:37:24,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34863-0x10140a548120000, quorum=127.0.0.1:62160, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:37:24,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75434f63{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:24,108 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a2066f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:37:24,108 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:37:24,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@aab268d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:37:24,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ce533a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/hadoop.log.dir/,STOPPED} 2024-11-15T22:37:24,109 WARN [BP-1214065719-172.17.0.3-1731710239637 heartbeating to localhost/127.0.0.1:36437 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:37:24,109 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:37:24,109 WARN [BP-1214065719-172.17.0.3-1731710239637 heartbeating to localhost/127.0.0.1:36437 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1214065719-172.17.0.3-1731710239637 (Datanode Uuid f94ac678-8fe6-4cb2-9d31-d9d6f0340ad1) service to localhost/127.0.0.1:36437 2024-11-15T22:37:24,109 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:37:24,110 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/cluster_29d7c4ac-b328-cc97-7a13-329239f9a74f/data/data3/current/BP-1214065719-172.17.0.3-1731710239637 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:24,110 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/cluster_29d7c4ac-b328-cc97-7a13-329239f9a74f/data/data4/current/BP-1214065719-172.17.0.3-1731710239637 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:24,110 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:37:24,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d69c419{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:24,114 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f2378c9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:37:24,114 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:37:24,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7517d9e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:37:24,115 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e23c0c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/hadoop.log.dir/,STOPPED} 2024-11-15T22:37:24,116 WARN [BP-1214065719-172.17.0.3-1731710239637 heartbeating to localhost/127.0.0.1:36437 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:37:24,116 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:37:24,116 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:37:24,116 WARN [BP-1214065719-172.17.0.3-1731710239637 heartbeating to localhost/127.0.0.1:36437 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1214065719-172.17.0.3-1731710239637 (Datanode Uuid 535a349a-a24e-4737-9b67-6a39f7cfb824) service to localhost/127.0.0.1:36437 2024-11-15T22:37:24,116 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/cluster_29d7c4ac-b328-cc97-7a13-329239f9a74f/data/data1/current/BP-1214065719-172.17.0.3-1731710239637 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:24,116 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/cluster_29d7c4ac-b328-cc97-7a13-329239f9a74f/data/data2/current/BP-1214065719-172.17.0.3-1731710239637 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:24,117 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:37:24,122 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d95bc23{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T22:37:24,122 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78fa6004{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:37:24,122 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:37:24,123 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30a1c2a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:37:24,123 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f841e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/hadoop.log.dir/,STOPPED} 2024-11-15T22:37:24,128 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T22:37:24,144 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T22:37:24,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T22:37:24,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/hadoop.log.dir so I do NOT create it in target/test-data/4867067f-b67d-7264-ece4-5a61857a2385 2024-11-15T22:37:24,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7d81e28-991e-ddc4-45d4-da94ca0d27d3/hadoop.tmp.dir so I do NOT create it in target/test-data/4867067f-b67d-7264-ece4-5a61857a2385 2024-11-15T22:37:24,145 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3, deleteOnExit=true 2024-11-15T22:37:24,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T22:37:24,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/test.cache.data in system properties and HBase conf 2024-11-15T22:37:24,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T22:37:24,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir in system properties and HBase conf 2024-11-15T22:37:24,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T22:37:24,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T22:37:24,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T22:37:24,146 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T22:37:24,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T22:37:24,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T22:37:24,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T22:37:24,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T22:37:24,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T22:37:24,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T22:37:24,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T22:37:24,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T22:37:24,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T22:37:24,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/nfs.dump.dir in system properties and HBase conf 2024-11-15T22:37:24,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/java.io.tmpdir in system properties and HBase conf 2024-11-15T22:37:24,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T22:37:24,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T22:37:24,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T22:37:24,159 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T22:37:24,494 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-15T22:37:24,498 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:37:24,513 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:37:24,514 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:37:24,515 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:37:24,536 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:37:24,541 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:37:24,544 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:37:24,544 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:37:24,544 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:37:24,545 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:37:24,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65f2c48f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:37:24,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4478d7de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:37:24,638 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@68a89b56{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/java.io.tmpdir/jetty-localhost-37787-hadoop-hdfs-3_4_1-tests_jar-_-any-7218983395783702911/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T22:37:24,639 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@229a8eec{HTTP/1.1, (http/1.1)}{localhost:37787} 2024-11-15T22:37:24,639 INFO [Time-limited test {}] server.Server(415): Started @102797ms 2024-11-15T22:37:24,640 INFO [regionserver/e611192d6313:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T22:37:24,651 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T22:37:24,906 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:37:24,910 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:37:24,911 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:37:24,911 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:37:24,911 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T22:37:24,912 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c62a115{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:37:24,912 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b76e63f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:37:25,005 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ff95875{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/java.io.tmpdir/jetty-localhost-45263-hadoop-hdfs-3_4_1-tests_jar-_-any-4160625915241688853/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:25,006 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35026af9{HTTP/1.1, (http/1.1)}{localhost:45263} 2024-11-15T22:37:25,006 INFO [Time-limited test {}] server.Server(415): Started @103163ms 2024-11-15T22:37:25,007 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:37:25,037 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:37:25,041 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:37:25,042 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:37:25,042 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:37:25,042 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:37:25,042 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6ace9e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:37:25,043 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f68268f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:37:25,136 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5f9b72e1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/java.io.tmpdir/jetty-localhost-41505-hadoop-hdfs-3_4_1-tests_jar-_-any-9957840619791816743/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:25,136 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ade06e3{HTTP/1.1, (http/1.1)}{localhost:41505} 2024-11-15T22:37:25,136 INFO [Time-limited test {}] server.Server(415): Started @103294ms 2024-11-15T22:37:25,137 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:37:26,124 WARN [Thread-664 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data1/current/BP-1428576717-172.17.0.3-1731710244171/current, will proceed with Du for space computation calculation, 2024-11-15T22:37:26,124 WARN [Thread-665 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data2/current/BP-1428576717-172.17.0.3-1731710244171/current, will proceed with Du for space computation calculation, 2024-11-15T22:37:26,144 WARN [Thread-628 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:37:26,146 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7ec6f55f61f09812 with lease ID 0xaba062ed3263d2b9: Processing first storage report for DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39 from datanode DatanodeRegistration(127.0.0.1:33791, datanodeUuid=913c4416-9ea9-44ed-8385-e512950dee45, infoPort=46779, infoSecurePort=0, ipcPort=39447, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171) 2024-11-15T22:37:26,146 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7ec6f55f61f09812 with lease ID 0xaba062ed3263d2b9: from storage DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39 node DatanodeRegistration(127.0.0.1:33791, datanodeUuid=913c4416-9ea9-44ed-8385-e512950dee45, infoPort=46779, infoSecurePort=0, ipcPort=39447, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T22:37:26,146 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7ec6f55f61f09812 with lease ID 0xaba062ed3263d2b9: Processing first storage report for DS-e92757d5-2a39-4c27-91b6-8b44e67c811e from datanode DatanodeRegistration(127.0.0.1:33791, datanodeUuid=913c4416-9ea9-44ed-8385-e512950dee45, infoPort=46779, infoSecurePort=0, ipcPort=39447, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171) 2024-11-15T22:37:26,146 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7ec6f55f61f09812 with lease ID 0xaba062ed3263d2b9: from storage DS-e92757d5-2a39-4c27-91b6-8b44e67c811e node DatanodeRegistration(127.0.0.1:33791, datanodeUuid=913c4416-9ea9-44ed-8385-e512950dee45, infoPort=46779, infoSecurePort=0, ipcPort=39447, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:37:26,253 WARN [Thread-675 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data3/current/BP-1428576717-172.17.0.3-1731710244171/current, will proceed with Du for space computation calculation, 2024-11-15T22:37:26,253 WARN [Thread-676 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data4/current/BP-1428576717-172.17.0.3-1731710244171/current, will proceed with Du for space computation calculation, 2024-11-15T22:37:26,271 WARN [Thread-651 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:37:26,273 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x43702e856e8b2c05 with lease ID 0xaba062ed3263d2ba: Processing first storage report for DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3 from datanode DatanodeRegistration(127.0.0.1:44063, datanodeUuid=581fc627-72c4-42db-bdf9-451bbec9abcb, infoPort=37353, infoSecurePort=0, ipcPort=42413, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171) 2024-11-15T22:37:26,273 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x43702e856e8b2c05 with lease ID 0xaba062ed3263d2ba: from storage DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3 node DatanodeRegistration(127.0.0.1:44063, datanodeUuid=581fc627-72c4-42db-bdf9-451bbec9abcb, infoPort=37353, infoSecurePort=0, ipcPort=42413, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:37:26,273 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x43702e856e8b2c05 with lease ID 0xaba062ed3263d2ba: Processing first storage report for DS-977b2219-6b5d-4a8e-b782-9c30294d9944 from datanode DatanodeRegistration(127.0.0.1:44063, datanodeUuid=581fc627-72c4-42db-bdf9-451bbec9abcb, infoPort=37353, infoSecurePort=0, ipcPort=42413, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171) 2024-11-15T22:37:26,274 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x43702e856e8b2c05 with lease ID 0xaba062ed3263d2ba: from storage DS-977b2219-6b5d-4a8e-b782-9c30294d9944 node DatanodeRegistration(127.0.0.1:44063, datanodeUuid=581fc627-72c4-42db-bdf9-451bbec9abcb, infoPort=37353, infoSecurePort=0, ipcPort=42413, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:37:26,379 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385 2024-11-15T22:37:26,382 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/zookeeper_0, clientPort=50674, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T22:37:26,382 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50674 2024-11-15T22:37:26,383 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:26,384 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:26,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33791 is added to blk_1073741825_1001 (size=7) 2024-11-15T22:37:26,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44063 is added to blk_1073741825_1001 (size=7) 2024-11-15T22:37:26,397 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25 with version=8 2024-11-15T22:37:26,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/hbase-staging 2024-11-15T22:37:26,399 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e611192d6313:0 server-side Connection retries=45 2024-11-15T22:37:26,400 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:37:26,400 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T22:37:26,400 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T22:37:26,400 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:37:26,400 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T22:37:26,400 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T22:37:26,400 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T22:37:26,401 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46573 2024-11-15T22:37:26,403 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46573 connecting to ZooKeeper ensemble=127.0.0.1:50674 2024-11-15T22:37:26,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:465730x0, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T22:37:26,461 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46573-0x10140a559990000 connected 2024-11-15T22:37:26,544 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:26,546 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:26,548 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:37:26,549 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25, hbase.cluster.distributed=false 2024-11-15T22:37:26,550 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T22:37:26,551 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46573 2024-11-15T22:37:26,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46573 2024-11-15T22:37:26,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46573 2024-11-15T22:37:26,553 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46573 2024-11-15T22:37:26,553 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46573 2024-11-15T22:37:26,574 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e611192d6313:0 server-side Connection retries=45 2024-11-15T22:37:26,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:37:26,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T22:37:26,574 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T22:37:26,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:37:26,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T22:37:26,574 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T22:37:26,574 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T22:37:26,575 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44569 2024-11-15T22:37:26,576 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44569 connecting to ZooKeeper ensemble=127.0.0.1:50674 2024-11-15T22:37:26,577 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:26,579 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:26,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:445690x0, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T22:37:26,586 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:445690x0, quorum=127.0.0.1:50674, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:37:26,586 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44569-0x10140a559990001 connected 2024-11-15T22:37:26,586 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T22:37:26,587 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T22:37:26,587 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T22:37:26,589 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T22:37:26,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44569 2024-11-15T22:37:26,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44569 2024-11-15T22:37:26,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44569 2024-11-15T22:37:26,590 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44569 2024-11-15T22:37:26,590 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44569 2024-11-15T22:37:26,601 DEBUG [M:0;e611192d6313:46573 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e611192d6313:46573 2024-11-15T22:37:26,601 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e611192d6313,46573,1731710246399 2024-11-15T22:37:26,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:37:26,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:37:26,607 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e611192d6313,46573,1731710246399 2024-11-15T22:37:26,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:26,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T22:37:26,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:26,618 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T22:37:26,619 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e611192d6313,46573,1731710246399 from backup master directory 2024-11-15T22:37:26,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e611192d6313,46573,1731710246399 2024-11-15T22:37:26,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:37:26,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:37:26,628 WARN [master/e611192d6313:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T22:37:26,628 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e611192d6313,46573,1731710246399 2024-11-15T22:37:26,636 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/hbase.id] with ID: bf08d3ae-39f5-4160-9b3b-be24333a7eae 2024-11-15T22:37:26,636 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/.tmp/hbase.id 2024-11-15T22:37:26,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44063 is added to blk_1073741826_1002 (size=42) 2024-11-15T22:37:26,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33791 is added to blk_1073741826_1002 (size=42) 2024-11-15T22:37:26,644 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/.tmp/hbase.id]:[hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/hbase.id] 2024-11-15T22:37:26,658 INFO [master/e611192d6313:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:26,658 INFO [master/e611192d6313:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T22:37:26,660 INFO [master/e611192d6313:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-15T22:37:26,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:26,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:26,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33791 is added to blk_1073741827_1003 (size=196) 2024-11-15T22:37:26,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44063 is added to blk_1073741827_1003 (size=196) 2024-11-15T22:37:26,679 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T22:37:26,681 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T22:37:26,681 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:37:26,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33791 is added to blk_1073741828_1004 (size=1189) 2024-11-15T22:37:26,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44063 is added to blk_1073741828_1004 (size=1189) 2024-11-15T22:37:26,692 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store 2024-11-15T22:37:26,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33791 is added to blk_1073741829_1005 (size=34) 2024-11-15T22:37:26,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44063 is added to blk_1073741829_1005 (size=34) 2024-11-15T22:37:26,700 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:37:26,700 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T22:37:26,700 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:26,700 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:26,700 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T22:37:26,700 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:26,700 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:37:26,700 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731710246700Disabling compacts and flushes for region at 1731710246700Disabling writes for close at 1731710246700Writing region close event to WAL at 1731710246700Closed at 1731710246700 2024-11-15T22:37:26,701 WARN [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/.initializing 2024-11-15T22:37:26,701 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/WALs/e611192d6313,46573,1731710246399 2024-11-15T22:37:26,704 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C46573%2C1731710246399, suffix=, logDir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/WALs/e611192d6313,46573,1731710246399, archiveDir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/oldWALs, maxLogs=10 2024-11-15T22:37:26,704 INFO [master/e611192d6313:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C46573%2C1731710246399.1731710246704 2024-11-15T22:37:26,709 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/WALs/e611192d6313,46573,1731710246399/e611192d6313%2C46573%2C1731710246399.1731710246704 2024-11-15T22:37:26,710 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46779:46779),(127.0.0.1/127.0.0.1:37353:37353)] 2024-11-15T22:37:26,711 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:37:26,711 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:37:26,711 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:26,711 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:26,713 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:26,714 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T22:37:26,714 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:26,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:26,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:26,716 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T22:37:26,716 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:26,716 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:37:26,717 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:26,718 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T22:37:26,718 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:26,718 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:37:26,718 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:26,719 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T22:37:26,719 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:26,720 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:37:26,720 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:26,721 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:26,721 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:26,722 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:26,722 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:26,723 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T22:37:26,724 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:37:26,726 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:37:26,727 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=851183, jitterRate=0.08233518898487091}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T22:37:26,728 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731710246711Initializing all the Stores at 1731710246712 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710246712Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710246712Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710246712Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710246712Cleaning up temporary data from old regions at 1731710246722 (+10 ms)Region opened successfully at 1731710246728 (+6 ms) 2024-11-15T22:37:26,728 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T22:37:26,731 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bdb92d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e611192d6313/172.17.0.3:0 2024-11-15T22:37:26,732 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T22:37:26,732 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T22:37:26,732 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T22:37:26,733 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T22:37:26,733 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T22:37:26,734 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T22:37:26,734 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T22:37:26,738 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T22:37:26,739 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T22:37:26,743 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T22:37:26,743 INFO [master/e611192d6313:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T22:37:26,744 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T22:37:26,754 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T22:37:26,754 INFO [master/e611192d6313:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T22:37:26,755 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T22:37:26,764 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T22:37:26,766 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T22:37:26,775 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T22:37:26,777 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T22:37:26,785 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T22:37:26,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T22:37:26,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T22:37:26,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:26,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:26,797 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e611192d6313,46573,1731710246399, sessionid=0x10140a559990000, setting cluster-up flag (Was=false) 2024-11-15T22:37:26,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:26,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:26,849 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T22:37:26,851 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e611192d6313,46573,1731710246399 2024-11-15T22:37:26,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:26,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:26,902 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T22:37:26,906 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e611192d6313,46573,1731710246399 2024-11-15T22:37:26,910 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T22:37:26,913 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T22:37:26,914 INFO [master/e611192d6313:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T22:37:26,914 INFO [master/e611192d6313:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T22:37:26,914 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e611192d6313,46573,1731710246399 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T22:37:26,915 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:37:26,915 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:37:26,916 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:37:26,916 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:37:26,916 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e611192d6313:0, corePoolSize=10, maxPoolSize=10 2024-11-15T22:37:26,916 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:26,916 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e611192d6313:0, corePoolSize=2, maxPoolSize=2 2024-11-15T22:37:26,916 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:26,917 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731710276917 2024-11-15T22:37:26,917 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T22:37:26,917 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T22:37:26,917 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T22:37:26,917 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T22:37:26,917 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T22:37:26,917 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T22:37:26,917 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:26,918 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T22:37:26,918 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T22:37:26,918 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T22:37:26,918 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:37:26,918 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T22:37:26,918 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T22:37:26,918 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T22:37:26,918 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710246918,5,FailOnTimeoutGroup] 2024-11-15T22:37:26,918 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710246918,5,FailOnTimeoutGroup] 2024-11-15T22:37:26,918 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:26,919 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T22:37:26,919 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:26,919 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:26,919 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:26,919 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T22:37:26,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33791 is added to blk_1073741831_1007 (size=1321) 2024-11-15T22:37:26,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44063 is added to blk_1073741831_1007 (size=1321) 2024-11-15T22:37:26,928 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T22:37:26,929 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25 2024-11-15T22:37:26,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44063 is added to blk_1073741832_1008 (size=32) 2024-11-15T22:37:26,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33791 is added to blk_1073741832_1008 (size=32) 2024-11-15T22:37:26,937 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:37:26,938 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T22:37:26,940 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T22:37:26,940 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:26,940 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:26,940 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T22:37:26,942 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T22:37:26,942 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:26,942 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:26,942 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T22:37:26,944 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T22:37:26,944 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:26,944 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:26,944 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T22:37:26,946 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T22:37:26,946 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:26,946 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:26,946 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T22:37:26,947 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740 2024-11-15T22:37:26,947 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740 2024-11-15T22:37:26,949 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T22:37:26,949 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T22:37:26,949 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T22:37:26,950 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T22:37:26,952 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:37:26,953 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716422, jitterRate=-0.08902306854724884}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T22:37:26,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731710246937Initializing all the Stores at 1731710246938 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710246938Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710246938Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710246938Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710246938Cleaning up temporary data from old regions at 1731710246949 (+11 ms)Region opened successfully at 1731710246954 (+5 ms) 2024-11-15T22:37:26,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T22:37:26,954 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T22:37:26,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T22:37:26,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T22:37:26,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T22:37:26,955 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T22:37:26,955 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731710246954Disabling compacts and flushes for region at 1731710246954Disabling writes for close at 1731710246954Writing region close event to WAL at 1731710246955 (+1 ms)Closed at 1731710246955 2024-11-15T22:37:26,956 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:37:26,956 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T22:37:26,957 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T22:37:26,958 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T22:37:26,960 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T22:37:26,992 INFO [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(746): ClusterId : bf08d3ae-39f5-4160-9b3b-be24333a7eae 2024-11-15T22:37:26,992 DEBUG [RS:0;e611192d6313:44569 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T22:37:27,006 DEBUG [RS:0;e611192d6313:44569 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T22:37:27,006 DEBUG [RS:0;e611192d6313:44569 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T22:37:27,018 DEBUG [RS:0;e611192d6313:44569 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T22:37:27,018 DEBUG [RS:0;e611192d6313:44569 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c3e3ca9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e611192d6313/172.17.0.3:0 2024-11-15T22:37:27,029 DEBUG [RS:0;e611192d6313:44569 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e611192d6313:44569 2024-11-15T22:37:27,029 INFO [RS:0;e611192d6313:44569 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T22:37:27,029 INFO [RS:0;e611192d6313:44569 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T22:37:27,029 DEBUG [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T22:37:27,030 INFO [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(2659): reportForDuty to master=e611192d6313,46573,1731710246399 with port=44569, startcode=1731710246574 2024-11-15T22:37:27,030 DEBUG [RS:0;e611192d6313:44569 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T22:37:27,032 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56591, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T22:37:27,033 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46573 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e611192d6313,44569,1731710246574 2024-11-15T22:37:27,033 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46573 {}] master.ServerManager(517): Registering regionserver=e611192d6313,44569,1731710246574 2024-11-15T22:37:27,035 DEBUG [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25 2024-11-15T22:37:27,035 DEBUG [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41079 2024-11-15T22:37:27,035 DEBUG [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T22:37:27,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:37:27,047 DEBUG [RS:0;e611192d6313:44569 {}] zookeeper.ZKUtil(111): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e611192d6313,44569,1731710246574 2024-11-15T22:37:27,048 WARN [RS:0;e611192d6313:44569 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T22:37:27,048 INFO [RS:0;e611192d6313:44569 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:37:27,048 DEBUG [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574 2024-11-15T22:37:27,048 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e611192d6313,44569,1731710246574] 2024-11-15T22:37:27,052 INFO [RS:0;e611192d6313:44569 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T22:37:27,055 INFO [RS:0;e611192d6313:44569 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T22:37:27,056 INFO [RS:0;e611192d6313:44569 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T22:37:27,056 INFO [RS:0;e611192d6313:44569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,056 INFO [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T22:37:27,057 INFO [RS:0;e611192d6313:44569 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T22:37:27,058 INFO [RS:0;e611192d6313:44569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,058 DEBUG [RS:0;e611192d6313:44569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,058 DEBUG [RS:0;e611192d6313:44569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,058 DEBUG [RS:0;e611192d6313:44569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,058 DEBUG [RS:0;e611192d6313:44569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,058 DEBUG [RS:0;e611192d6313:44569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,058 DEBUG [RS:0;e611192d6313:44569 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e611192d6313:0, corePoolSize=2, maxPoolSize=2 2024-11-15T22:37:27,058 DEBUG [RS:0;e611192d6313:44569 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,058 DEBUG [RS:0;e611192d6313:44569 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,058 DEBUG [RS:0;e611192d6313:44569 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,058 DEBUG [RS:0;e611192d6313:44569 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,058 DEBUG [RS:0;e611192d6313:44569 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,058 DEBUG [RS:0;e611192d6313:44569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,058 DEBUG [RS:0;e611192d6313:44569 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:37:27,059 DEBUG [RS:0;e611192d6313:44569 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:37:27,059 INFO [RS:0;e611192d6313:44569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,059 INFO [RS:0;e611192d6313:44569 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,059 INFO [RS:0;e611192d6313:44569 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,059 INFO [RS:0;e611192d6313:44569 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,059 INFO [RS:0;e611192d6313:44569 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,059 INFO [RS:0;e611192d6313:44569 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,44569,1731710246574-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T22:37:27,073 INFO [RS:0;e611192d6313:44569 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T22:37:27,073 INFO [RS:0;e611192d6313:44569 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,44569,1731710246574-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,073 INFO [RS:0;e611192d6313:44569 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,073 INFO [RS:0;e611192d6313:44569 {}] regionserver.Replication(171): e611192d6313,44569,1731710246574 started 2024-11-15T22:37:27,113 WARN [e611192d6313:46573 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T22:37:27,117 INFO [RS:0;e611192d6313:44569 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,117 INFO [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(1482): Serving as e611192d6313,44569,1731710246574, RpcServer on e611192d6313/172.17.0.3:44569, sessionid=0x10140a559990001 2024-11-15T22:37:27,117 DEBUG [RS:0;e611192d6313:44569 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T22:37:27,117 DEBUG [RS:0;e611192d6313:44569 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e611192d6313,44569,1731710246574 2024-11-15T22:37:27,117 DEBUG [RS:0;e611192d6313:44569 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,44569,1731710246574' 2024-11-15T22:37:27,117 DEBUG [RS:0;e611192d6313:44569 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T22:37:27,118 DEBUG [RS:0;e611192d6313:44569 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T22:37:27,118 DEBUG [RS:0;e611192d6313:44569 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T22:37:27,118 DEBUG [RS:0;e611192d6313:44569 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T22:37:27,118 DEBUG [RS:0;e611192d6313:44569 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e611192d6313,44569,1731710246574 2024-11-15T22:37:27,118 DEBUG [RS:0;e611192d6313:44569 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,44569,1731710246574' 2024-11-15T22:37:27,118 DEBUG [RS:0;e611192d6313:44569 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T22:37:27,118 DEBUG [RS:0;e611192d6313:44569 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T22:37:27,119 DEBUG [RS:0;e611192d6313:44569 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T22:37:27,119 INFO [RS:0;e611192d6313:44569 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T22:37:27,119 INFO [RS:0;e611192d6313:44569 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T22:37:27,222 INFO [RS:0;e611192d6313:44569 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C44569%2C1731710246574, suffix=, logDir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574, archiveDir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/oldWALs, maxLogs=32 2024-11-15T22:37:27,224 INFO [RS:0;e611192d6313:44569 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C44569%2C1731710246574.1731710247224 2024-11-15T22:37:27,233 INFO [RS:0;e611192d6313:44569 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 2024-11-15T22:37:27,238 DEBUG [RS:0;e611192d6313:44569 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37353:37353),(127.0.0.1/127.0.0.1:46779:46779)] 2024-11-15T22:37:27,363 DEBUG [e611192d6313:46573 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T22:37:27,365 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e611192d6313,44569,1731710246574 2024-11-15T22:37:27,369 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e611192d6313,44569,1731710246574, state=OPENING 2024-11-15T22:37:27,417 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T22:37:27,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:27,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:37:27,429 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T22:37:27,429 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:37:27,429 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:37:27,429 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e611192d6313,44569,1731710246574}] 2024-11-15T22:37:27,584 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T22:37:27,590 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50055, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T22:37:27,596 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T22:37:27,596 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:37:27,598 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C44569%2C1731710246574.meta, suffix=.meta, logDir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574, archiveDir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/oldWALs, maxLogs=32 2024-11-15T22:37:27,599 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta 2024-11-15T22:37:27,605 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta 2024-11-15T22:37:27,608 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46779:46779),(127.0.0.1/127.0.0.1:37353:37353)] 2024-11-15T22:37:27,612 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:37:27,613 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T22:37:27,613 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T22:37:27,613 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T22:37:27,613 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T22:37:27,613 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:37:27,613 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T22:37:27,613 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T22:37:27,616 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T22:37:27,617 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T22:37:27,617 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:27,618 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:27,618 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T22:37:27,619 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T22:37:27,619 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:27,619 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:27,619 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T22:37:27,620 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T22:37:27,620 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:27,621 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:27,621 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T22:37:27,622 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T22:37:27,622 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:27,622 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:37:27,622 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T22:37:27,623 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740 2024-11-15T22:37:27,624 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740 2024-11-15T22:37:27,625 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T22:37:27,625 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T22:37:27,626 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T22:37:27,627 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T22:37:27,628 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=853276, jitterRate=0.08499765396118164}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T22:37:27,628 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T22:37:27,629 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731710247613Writing region info on filesystem at 1731710247613Initializing all the Stores at 1731710247614 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710247614Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710247616 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710247616Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710247616Cleaning up temporary data from old regions at 1731710247625 (+9 ms)Running coprocessor post-open hooks at 1731710247628 (+3 ms)Region opened successfully at 1731710247629 (+1 ms) 2024-11-15T22:37:27,630 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731710247584 2024-11-15T22:37:27,632 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T22:37:27,633 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T22:37:27,633 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e611192d6313,44569,1731710246574 2024-11-15T22:37:27,634 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e611192d6313,44569,1731710246574, state=OPEN 2024-11-15T22:37:27,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T22:37:27,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T22:37:27,671 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:37:27,671 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:37:27,671 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e611192d6313,44569,1731710246574 2024-11-15T22:37:27,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T22:37:27,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e611192d6313,44569,1731710246574 in 242 msec 2024-11-15T22:37:27,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T22:37:27,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 718 msec 2024-11-15T22:37:27,679 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:37:27,679 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T22:37:27,680 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T22:37:27,680 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e611192d6313,44569,1731710246574, seqNum=-1] 2024-11-15T22:37:27,681 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T22:37:27,682 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35801, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T22:37:27,688 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 775 msec 2024-11-15T22:37:27,688 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731710247688, completionTime=-1 2024-11-15T22:37:27,688 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T22:37:27,688 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T22:37:27,690 INFO [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T22:37:27,690 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731710307690 2024-11-15T22:37:27,690 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731710367690 2024-11-15T22:37:27,690 INFO [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-15T22:37:27,691 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,46573,1731710246399-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,691 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,46573,1731710246399-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,691 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,46573,1731710246399-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,691 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e611192d6313:46573, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,691 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,691 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,693 DEBUG [master/e611192d6313:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T22:37:27,695 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.067sec 2024-11-15T22:37:27,695 INFO [master/e611192d6313:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T22:37:27,696 INFO [master/e611192d6313:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T22:37:27,696 INFO [master/e611192d6313:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T22:37:27,696 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T22:37:27,696 INFO [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T22:37:27,696 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,46573,1731710246399-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T22:37:27,696 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,46573,1731710246399-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T22:37:27,699 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T22:37:27,699 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T22:37:27,699 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,46573,1731710246399-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,714 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@337c5dd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:37:27,715 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e611192d6313,46573,-1 for getting cluster id 2024-11-15T22:37:27,715 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T22:37:27,716 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'bf08d3ae-39f5-4160-9b3b-be24333a7eae' 2024-11-15T22:37:27,717 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T22:37:27,717 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "bf08d3ae-39f5-4160-9b3b-be24333a7eae" 2024-11-15T22:37:27,717 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f8038e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:37:27,717 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e611192d6313,46573,-1] 2024-11-15T22:37:27,717 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T22:37:27,718 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:37:27,719 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51684, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T22:37:27,720 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38567dd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:37:27,720 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T22:37:27,721 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e611192d6313,44569,1731710246574, seqNum=-1] 2024-11-15T22:37:27,722 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T22:37:27,723 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51766, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T22:37:27,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e611192d6313,46573,1731710246399 2024-11-15T22:37:27,725 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:27,728 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T22:37:27,742 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e611192d6313:0 server-side Connection retries=45 2024-11-15T22:37:27,742 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:37:27,742 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T22:37:27,742 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T22:37:27,742 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:37:27,743 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T22:37:27,743 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T22:37:27,743 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T22:37:27,743 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45181 2024-11-15T22:37:27,745 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45181 connecting to ZooKeeper ensemble=127.0.0.1:50674 2024-11-15T22:37:27,745 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:27,747 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:37:27,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:451810x0, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T22:37:27,765 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:451810x0, quorum=127.0.0.1:50674, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-15T22:37:27,765 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-15T22:37:27,765 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45181-0x10140a559990002 connected 2024-11-15T22:37:27,766 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T22:37:27,766 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T22:37:27,767 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:45181-0x10140a559990002, quorum=127.0.0.1:50674, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T22:37:27,768 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45181-0x10140a559990002, quorum=127.0.0.1:50674, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T22:37:27,772 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45181 2024-11-15T22:37:27,772 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45181 2024-11-15T22:37:27,773 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45181 2024-11-15T22:37:27,774 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45181 2024-11-15T22:37:27,774 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45181 2024-11-15T22:37:27,775 INFO [RS:1;e611192d6313:45181 {}] regionserver.HRegionServer(746): ClusterId : bf08d3ae-39f5-4160-9b3b-be24333a7eae 2024-11-15T22:37:27,775 DEBUG [RS:1;e611192d6313:45181 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T22:37:27,787 DEBUG [RS:1;e611192d6313:45181 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T22:37:27,787 DEBUG [RS:1;e611192d6313:45181 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T22:37:27,797 DEBUG [RS:1;e611192d6313:45181 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T22:37:27,798 DEBUG [RS:1;e611192d6313:45181 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fe73e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e611192d6313/172.17.0.3:0 2024-11-15T22:37:27,810 DEBUG [RS:1;e611192d6313:45181 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;e611192d6313:45181 2024-11-15T22:37:27,810 INFO [RS:1;e611192d6313:45181 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T22:37:27,810 INFO [RS:1;e611192d6313:45181 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T22:37:27,810 DEBUG [RS:1;e611192d6313:45181 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T22:37:27,811 INFO [RS:1;e611192d6313:45181 {}] regionserver.HRegionServer(2659): reportForDuty to master=e611192d6313,46573,1731710246399 with port=45181, startcode=1731710247742 2024-11-15T22:37:27,811 DEBUG [RS:1;e611192d6313:45181 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T22:37:27,812 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54201, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T22:37:27,813 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46573 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e611192d6313,45181,1731710247742 2024-11-15T22:37:27,813 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46573 {}] master.ServerManager(517): Registering regionserver=e611192d6313,45181,1731710247742 2024-11-15T22:37:27,815 DEBUG [RS:1;e611192d6313:45181 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25 2024-11-15T22:37:27,815 DEBUG [RS:1;e611192d6313:45181 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41079 2024-11-15T22:37:27,815 DEBUG [RS:1;e611192d6313:45181 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T22:37:27,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:37:27,828 DEBUG [RS:1;e611192d6313:45181 {}] zookeeper.ZKUtil(111): regionserver:45181-0x10140a559990002, quorum=127.0.0.1:50674, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e611192d6313,45181,1731710247742 2024-11-15T22:37:27,828 WARN [RS:1;e611192d6313:45181 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T22:37:27,828 INFO [RS:1;e611192d6313:45181 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:37:27,828 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e611192d6313,45181,1731710247742] 2024-11-15T22:37:27,828 DEBUG [RS:1;e611192d6313:45181 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742 2024-11-15T22:37:27,832 INFO [RS:1;e611192d6313:45181 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T22:37:27,835 INFO [RS:1;e611192d6313:45181 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T22:37:27,835 INFO [RS:1;e611192d6313:45181 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T22:37:27,835 INFO [RS:1;e611192d6313:45181 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,836 INFO [RS:1;e611192d6313:45181 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T22:37:27,837 INFO [RS:1;e611192d6313:45181 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T22:37:27,837 INFO [RS:1;e611192d6313:45181 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,837 DEBUG [RS:1;e611192d6313:45181 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,837 DEBUG [RS:1;e611192d6313:45181 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,837 DEBUG [RS:1;e611192d6313:45181 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,837 DEBUG [RS:1;e611192d6313:45181 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,837 DEBUG [RS:1;e611192d6313:45181 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,837 DEBUG [RS:1;e611192d6313:45181 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e611192d6313:0, corePoolSize=2, maxPoolSize=2 2024-11-15T22:37:27,837 DEBUG [RS:1;e611192d6313:45181 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,838 DEBUG [RS:1;e611192d6313:45181 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,838 DEBUG [RS:1;e611192d6313:45181 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,838 DEBUG [RS:1;e611192d6313:45181 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,838 DEBUG [RS:1;e611192d6313:45181 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,838 DEBUG [RS:1;e611192d6313:45181 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:37:27,838 DEBUG [RS:1;e611192d6313:45181 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:37:27,838 DEBUG [RS:1;e611192d6313:45181 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:37:27,839 INFO [RS:1;e611192d6313:45181 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,839 INFO [RS:1;e611192d6313:45181 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,839 INFO [RS:1;e611192d6313:45181 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,839 INFO [RS:1;e611192d6313:45181 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,839 INFO [RS:1;e611192d6313:45181 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,839 INFO [RS:1;e611192d6313:45181 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,45181,1731710247742-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T22:37:27,853 INFO [RS:1;e611192d6313:45181 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T22:37:27,853 INFO [RS:1;e611192d6313:45181 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,45181,1731710247742-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,853 INFO [RS:1;e611192d6313:45181 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,853 INFO [RS:1;e611192d6313:45181 {}] regionserver.Replication(171): e611192d6313,45181,1731710247742 started 2024-11-15T22:37:27,864 INFO [RS:1;e611192d6313:45181 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:37:27,865 INFO [RS:1;e611192d6313:45181 {}] regionserver.HRegionServer(1482): Serving as e611192d6313,45181,1731710247742, RpcServer on e611192d6313/172.17.0.3:45181, sessionid=0x10140a559990002 2024-11-15T22:37:27,865 DEBUG [RS:1;e611192d6313:45181 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T22:37:27,865 DEBUG [RS:1;e611192d6313:45181 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e611192d6313,45181,1731710247742 2024-11-15T22:37:27,865 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;e611192d6313:45181,5,FailOnTimeoutGroup] 2024-11-15T22:37:27,865 DEBUG [RS:1;e611192d6313:45181 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,45181,1731710247742' 2024-11-15T22:37:27,865 DEBUG [RS:1;e611192d6313:45181 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T22:37:27,865 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-15T22:37:27,865 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T22:37:27,866 DEBUG [RS:1;e611192d6313:45181 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T22:37:27,866 DEBUG [RS:1;e611192d6313:45181 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T22:37:27,866 DEBUG [RS:1;e611192d6313:45181 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T22:37:27,866 DEBUG [RS:1;e611192d6313:45181 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e611192d6313,45181,1731710247742 2024-11-15T22:37:27,866 DEBUG [RS:1;e611192d6313:45181 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,45181,1731710247742' 2024-11-15T22:37:27,866 DEBUG [RS:1;e611192d6313:45181 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T22:37:27,866 DEBUG [RS:1;e611192d6313:45181 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T22:37:27,867 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is e611192d6313,46573,1731710246399 2024-11-15T22:37:27,867 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2ff67fa3 2024-11-15T22:37:27,867 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T22:37:27,867 DEBUG [RS:1;e611192d6313:45181 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T22:37:27,867 INFO [RS:1;e611192d6313:45181 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T22:37:27,867 INFO [RS:1;e611192d6313:45181 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T22:37:27,869 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51696, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T22:37:27,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46573 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T22:37:27,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46573 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T22:37:27,869 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46573 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T22:37:27,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46573 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T22:37:27,872 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T22:37:27,872 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:27,872 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46573 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-15T22:37:27,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46573 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T22:37:27,873 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T22:37:27,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33791 is added to blk_1073741835_1011 (size=393) 2024-11-15T22:37:27,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44063 is added to blk_1073741835_1011 (size=393) 2024-11-15T22:37:27,882 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4876379c8ce36c0ab9ae28b3c1e67fd7, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25 2024-11-15T22:37:27,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33791 is added to blk_1073741836_1012 (size=76) 2024-11-15T22:37:27,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44063 is added to blk_1073741836_1012 (size=76) 2024-11-15T22:37:27,889 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:37:27,890 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 4876379c8ce36c0ab9ae28b3c1e67fd7, disabling compactions & flushes 2024-11-15T22:37:27,890 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:37:27,890 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:37:27,890 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. after waiting 0 ms 2024-11-15T22:37:27,890 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:37:27,890 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:37:27,890 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4876379c8ce36c0ab9ae28b3c1e67fd7: Waiting for close lock at 1731710247890Disabling compacts and flushes for region at 1731710247890Disabling writes for close at 1731710247890Writing region close event to WAL at 1731710247890Closed at 1731710247890 2024-11-15T22:37:27,891 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T22:37:27,892 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731710247892"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731710247892"}]},"ts":"1731710247892"} 2024-11-15T22:37:27,895 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T22:37:27,896 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T22:37:27,896 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731710247896"}]},"ts":"1731710247896"} 2024-11-15T22:37:27,899 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-15T22:37:27,899 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4876379c8ce36c0ab9ae28b3c1e67fd7, ASSIGN}] 2024-11-15T22:37:27,900 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4876379c8ce36c0ab9ae28b3c1e67fd7, ASSIGN 2024-11-15T22:37:27,902 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4876379c8ce36c0ab9ae28b3c1e67fd7, ASSIGN; state=OFFLINE, location=e611192d6313,44569,1731710246574; forceNewPlan=false, retain=false 2024-11-15T22:37:27,972 INFO [RS:1;e611192d6313:45181 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C45181%2C1731710247742, suffix=, logDir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742, archiveDir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/oldWALs, maxLogs=32 2024-11-15T22:37:27,973 INFO [RS:1;e611192d6313:45181 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C45181%2C1731710247742.1731710247973 2024-11-15T22:37:27,983 INFO [RS:1;e611192d6313:45181 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 2024-11-15T22:37:27,984 DEBUG [RS:1;e611192d6313:45181 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46779:46779),(127.0.0.1/127.0.0.1:37353:37353)] 2024-11-15T22:37:28,053 INFO [e611192d6313:46573 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-15T22:37:28,053 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4876379c8ce36c0ab9ae28b3c1e67fd7, regionState=OPENING, regionLocation=e611192d6313,44569,1731710246574 2024-11-15T22:37:28,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4876379c8ce36c0ab9ae28b3c1e67fd7, ASSIGN because future has completed 2024-11-15T22:37:28,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4876379c8ce36c0ab9ae28b3c1e67fd7, server=e611192d6313,44569,1731710246574}] 2024-11-15T22:37:28,224 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:37:28,224 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4876379c8ce36c0ab9ae28b3c1e67fd7, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:37:28,225 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:28,225 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:37:28,225 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:28,225 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:28,227 INFO [StoreOpener-4876379c8ce36c0ab9ae28b3c1e67fd7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:28,229 INFO [StoreOpener-4876379c8ce36c0ab9ae28b3c1e67fd7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4876379c8ce36c0ab9ae28b3c1e67fd7 columnFamilyName info 2024-11-15T22:37:28,230 DEBUG [StoreOpener-4876379c8ce36c0ab9ae28b3c1e67fd7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:37:28,230 INFO [StoreOpener-4876379c8ce36c0ab9ae28b3c1e67fd7-1 {}] regionserver.HStore(327): Store=4876379c8ce36c0ab9ae28b3c1e67fd7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:37:28,230 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:28,231 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:28,232 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:28,233 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:28,233 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:28,235 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:28,238 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:37:28,239 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4876379c8ce36c0ab9ae28b3c1e67fd7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=780064, jitterRate=-0.008097484707832336}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T22:37:28,239 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:28,239 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4876379c8ce36c0ab9ae28b3c1e67fd7: Running coprocessor pre-open hook at 1731710248225Writing region info on filesystem at 1731710248225Initializing all the Stores at 1731710248227 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710248227Cleaning up temporary data from old regions at 1731710248233 (+6 ms)Running coprocessor post-open hooks at 1731710248239 (+6 ms)Region opened successfully at 1731710248239 2024-11-15T22:37:28,241 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7., pid=6, masterSystemTime=1731710248215 2024-11-15T22:37:28,244 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:37:28,244 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:37:28,245 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4876379c8ce36c0ab9ae28b3c1e67fd7, regionState=OPEN, openSeqNum=2, regionLocation=e611192d6313,44569,1731710246574 2024-11-15T22:37:28,248 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4876379c8ce36c0ab9ae28b3c1e67fd7, server=e611192d6313,44569,1731710246574 because future has completed 2024-11-15T22:37:28,254 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T22:37:28,254 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4876379c8ce36c0ab9ae28b3c1e67fd7, server=e611192d6313,44569,1731710246574 in 191 msec 2024-11-15T22:37:28,257 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T22:37:28,257 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4876379c8ce36c0ab9ae28b3c1e67fd7, ASSIGN in 355 msec 2024-11-15T22:37:28,258 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T22:37:28,258 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731710248258"}]},"ts":"1731710248258"} 2024-11-15T22:37:28,260 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-15T22:37:28,262 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T22:37:28,264 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 393 msec 2024-11-15T22:37:28,402 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T22:37:28,402 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-15T22:37:28,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-15T22:37:33,118 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T22:37:33,120 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:37:33,145 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:37:33,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:37:33,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:37:33,159 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-15T22:37:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46573 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T22:37:37,949 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-15T22:37:37,949 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-15T22:37:37,953 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T22:37:37,953 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:37:37,965 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:37:37,967 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:37:37,968 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:37:37,968 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:37:37,968 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:37:37,969 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ca82099{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:37:37,969 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ed35b1e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:37:38,063 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1196c8fc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/java.io.tmpdir/jetty-localhost-39989-hadoop-hdfs-3_4_1-tests_jar-_-any-5439045191167771753/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:38,064 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5580c33e{HTTP/1.1, (http/1.1)}{localhost:39989} 2024-11-15T22:37:38,064 INFO [Time-limited test {}] server.Server(415): Started @116222ms 2024-11-15T22:37:38,065 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:37:38,191 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:37:38,194 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:37:38,196 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:37:38,196 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:37:38,196 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:37:38,196 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fe58b15{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:37:38,197 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5455501c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:37:38,290 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5e08dd81{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/java.io.tmpdir/jetty-localhost-39129-hadoop-hdfs-3_4_1-tests_jar-_-any-10272807014772467660/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:38,290 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@178f342a{HTTP/1.1, (http/1.1)}{localhost:39129} 2024-11-15T22:37:38,291 INFO [Time-limited test {}] server.Server(415): Started @116448ms 2024-11-15T22:37:38,292 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:37:38,370 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:37:38,377 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:37:38,378 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:37:38,378 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:37:38,378 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:37:38,378 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@792fa80c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:37:38,379 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d4ec789{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:37:38,514 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5dd0b56c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/java.io.tmpdir/jetty-localhost-36487-hadoop-hdfs-3_4_1-tests_jar-_-any-16789730171435642463/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:38,514 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3740407e{HTTP/1.1, (http/1.1)}{localhost:36487} 2024-11-15T22:37:38,514 INFO [Time-limited test {}] server.Server(415): Started @116672ms 2024-11-15T22:37:38,515 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:37:39,480 WARN [Thread-861 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data5/current/BP-1428576717-172.17.0.3-1731710244171/current, will proceed with Du for space computation calculation, 2024-11-15T22:37:39,480 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data6/current/BP-1428576717-172.17.0.3-1731710244171/current, will proceed with Du for space computation calculation, 2024-11-15T22:37:39,480 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data7/current/BP-1428576717-172.17.0.3-1731710244171/current, will proceed with Du for space computation calculation, 2024-11-15T22:37:39,481 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data8/current/BP-1428576717-172.17.0.3-1731710244171/current, will proceed with Du for space computation calculation, 2024-11-15T22:37:39,499 WARN [Thread-822 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:37:39,501 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x19a28cde1ea73e00 with lease ID 0xaba062ed3263d2bb: Processing first storage report for DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224 from datanode DatanodeRegistration(127.0.0.1:41737, datanodeUuid=e4df1d4b-07c5-4b2d-b4fb-055f7e699967, infoPort=34241, infoSecurePort=0, ipcPort=46305, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171) 2024-11-15T22:37:39,501 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x19a28cde1ea73e00 with lease ID 0xaba062ed3263d2bb: from storage DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224 node DatanodeRegistration(127.0.0.1:41737, datanodeUuid=e4df1d4b-07c5-4b2d-b4fb-055f7e699967, infoPort=34241, infoSecurePort=0, ipcPort=46305, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:37:39,501 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x19a28cde1ea73e00 with lease ID 0xaba062ed3263d2bb: Processing first storage report for DS-27e25397-30d7-4263-85c8-dae1907a9327 from datanode DatanodeRegistration(127.0.0.1:41737, datanodeUuid=e4df1d4b-07c5-4b2d-b4fb-055f7e699967, infoPort=34241, infoSecurePort=0, ipcPort=46305, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171) 2024-11-15T22:37:39,501 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x19a28cde1ea73e00 with lease ID 0xaba062ed3263d2bb: from storage DS-27e25397-30d7-4263-85c8-dae1907a9327 node DatanodeRegistration(127.0.0.1:41737, datanodeUuid=e4df1d4b-07c5-4b2d-b4fb-055f7e699967, infoPort=34241, infoSecurePort=0, ipcPort=46305, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:37:39,502 WARN [Thread-800 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:37:39,504 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x92a8c4d7feeb1620 with lease ID 0xaba062ed3263d2bc: Processing first storage report for DS-088677f4-f0db-4d2f-8741-897fa2c7056e from datanode DatanodeRegistration(127.0.0.1:35127, datanodeUuid=c1b6fc5f-bb2e-4afe-bbcf-2c93c2a62fac, infoPort=36625, infoSecurePort=0, ipcPort=35565, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171) 2024-11-15T22:37:39,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x92a8c4d7feeb1620 with lease ID 0xaba062ed3263d2bc: from storage DS-088677f4-f0db-4d2f-8741-897fa2c7056e node DatanodeRegistration(127.0.0.1:35127, datanodeUuid=c1b6fc5f-bb2e-4afe-bbcf-2c93c2a62fac, infoPort=36625, infoSecurePort=0, ipcPort=35565, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:37:39,504 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x92a8c4d7feeb1620 with lease ID 0xaba062ed3263d2bc: Processing first storage report for DS-b50cdaed-0b64-4810-b7d5-47c60135b993 from datanode DatanodeRegistration(127.0.0.1:35127, datanodeUuid=c1b6fc5f-bb2e-4afe-bbcf-2c93c2a62fac, infoPort=36625, infoSecurePort=0, ipcPort=35565, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171) 2024-11-15T22:37:39,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x92a8c4d7feeb1620 with lease ID 0xaba062ed3263d2bc: from storage DS-b50cdaed-0b64-4810-b7d5-47c60135b993 node DatanodeRegistration(127.0.0.1:35127, datanodeUuid=c1b6fc5f-bb2e-4afe-bbcf-2c93c2a62fac, infoPort=36625, infoSecurePort=0, ipcPort=35565, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:37:39,603 WARN [Thread-881 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data9/current/BP-1428576717-172.17.0.3-1731710244171/current, will proceed with Du for space computation calculation, 2024-11-15T22:37:39,603 WARN [Thread-882 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data10/current/BP-1428576717-172.17.0.3-1731710244171/current, will proceed with Du for space computation calculation, 2024-11-15T22:37:39,621 WARN [Thread-844 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:37:39,623 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc08e9e4ff23b2b46 with lease ID 0xaba062ed3263d2bd: Processing first storage report for DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c from datanode DatanodeRegistration(127.0.0.1:40635, datanodeUuid=9535c085-55f4-456b-b220-53fa517075d9, infoPort=38631, infoSecurePort=0, ipcPort=37191, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171) 2024-11-15T22:37:39,623 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc08e9e4ff23b2b46 with lease ID 0xaba062ed3263d2bd: from storage DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c node DatanodeRegistration(127.0.0.1:40635, datanodeUuid=9535c085-55f4-456b-b220-53fa517075d9, infoPort=38631, infoSecurePort=0, ipcPort=37191, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:37:39,624 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc08e9e4ff23b2b46 with lease ID 0xaba062ed3263d2bd: Processing first storage report for DS-b795b885-e0e7-4510-8477-20e5a6d08cfc from datanode DatanodeRegistration(127.0.0.1:40635, datanodeUuid=9535c085-55f4-456b-b220-53fa517075d9, infoPort=38631, infoSecurePort=0, ipcPort=37191, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171) 2024-11-15T22:37:39,624 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc08e9e4ff23b2b46 with lease ID 0xaba062ed3263d2bd: from storage DS-b795b885-e0e7-4510-8477-20e5a6d08cfc node DatanodeRegistration(127.0.0.1:40635, datanodeUuid=9535c085-55f4-456b-b220-53fa517075d9, infoPort=38631, infoSecurePort=0, ipcPort=37191, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:37:39,643 WARN [ResponseProcessor for block BP-1428576717-172.17.0.3-1731710244171:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1428576717-172.17.0.3-1731710244171:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:39,643 WARN [ResponseProcessor for block BP-1428576717-172.17.0.3-1731710244171:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1428576717-172.17.0.3-1731710244171:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-1428576717-172.17.0.3-1731710244171:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:39,643 WARN [ResponseProcessor for block BP-1428576717-172.17.0.3-1731710244171:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1428576717-172.17.0.3-1731710244171:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1428576717-172.17.0.3-1731710244171:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:39,644 WARN [DataStreamer for file /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 block BP-1428576717-172.17.0.3-1731710244171:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK], DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]) is bad. 2024-11-15T22:37:39,644 WARN [DataStreamer for file /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta block BP-1428576717-172.17.0.3-1731710244171:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK], DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]) is bad. 2024-11-15T22:37:39,644 WARN [ResponseProcessor for block BP-1428576717-172.17.0.3-1731710244171:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1428576717-172.17.0.3-1731710244171:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1428576717-172.17.0.3-1731710244171:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:39,644 WARN [DataStreamer for file /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 block BP-1428576717-172.17.0.3-1731710244171:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK], DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]) is bad. 2024-11-15T22:37:39,645 WARN [DataStreamer for file /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/WALs/e611192d6313,46573,1731710246399/e611192d6313%2C46573%2C1731710246399.1731710246704 block BP-1428576717-172.17.0.3-1731710244171:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK], DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]) is bad. 2024-11-15T22:37:39,645 WARN [PacketResponder: BP-1428576717-172.17.0.3-1731710244171:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44063] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:39,645 WARN [PacketResponder: BP-1428576717-172.17.0.3-1731710244171:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44063] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:39,645 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:35648 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33791:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35648 dst: /127.0.0.1:33791 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:39,645 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:56946 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44063:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56946 dst: /127.0.0.1:44063 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:39,646 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_827244624_22 at /127.0.0.1:35622 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33791:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35622 dst: /127.0.0.1:33791 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:39,645 WARN [PacketResponder: BP-1428576717-172.17.0.3-1731710244171:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44063] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:39,646 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:56962 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44063:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56962 dst: /127.0.0.1:44063 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:39,647 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:35658 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33791:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35658 dst: /127.0.0.1:33791 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:39,647 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1380831495_22 at /127.0.0.1:35686 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:33791:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35686 dst: /127.0.0.1:33791 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:39,648 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5f9b72e1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:39,648 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1380831495_22 at /127.0.0.1:56992 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:44063:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56992 dst: /127.0.0.1:44063 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:39,648 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_827244624_22 at /127.0.0.1:56916 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44063:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56916 dst: /127.0.0.1:44063 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:39,649 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ade06e3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:37:39,649 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:37:39,649 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f68268f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:37:39,649 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6ace9e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir/,STOPPED} 2024-11-15T22:37:39,650 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:37:39,650 WARN [BP-1428576717-172.17.0.3-1731710244171 heartbeating to localhost/127.0.0.1:41079 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:37:39,651 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:37:39,651 WARN [BP-1428576717-172.17.0.3-1731710244171 heartbeating to localhost/127.0.0.1:41079 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1428576717-172.17.0.3-1731710244171 (Datanode Uuid 581fc627-72c4-42db-bdf9-451bbec9abcb) service to localhost/127.0.0.1:41079 2024-11-15T22:37:39,651 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data3/current/BP-1428576717-172.17.0.3-1731710244171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:39,652 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data4/current/BP-1428576717-172.17.0.3-1731710244171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:39,653 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:37:39,653 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@79f9ef95 {}] datanode.DataXceiver(331): 127.0.0.1:33791:DataXceiver error processing unknown operation src: /127.0.0.1:49732 dst: /127.0.0.1:33791 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:39,653 WARN [DataStreamer for file /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 block BP-1428576717-172.17.0.3-1731710244171:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:39,654 WARN [DataStreamer for file /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/WALs/e611192d6313,46573,1731710246399/e611192d6313%2C46573%2C1731710246399.1731710246704 block BP-1428576717-172.17.0.3-1731710244171:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:39,654 WARN [DataStreamer for file /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta block BP-1428576717-172.17.0.3-1731710244171:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:39,654 WARN [DataStreamer for file /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 block BP-1428576717-172.17.0.3-1731710244171:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:39,661 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ff95875{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:39,661 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35026af9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:37:39,661 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:37:39,661 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b76e63f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:37:39,662 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c62a115{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir/,STOPPED} 2024-11-15T22:37:39,663 WARN [BP-1428576717-172.17.0.3-1731710244171 heartbeating to localhost/127.0.0.1:41079 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:37:39,663 WARN [BP-1428576717-172.17.0.3-1731710244171 heartbeating to localhost/127.0.0.1:41079 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1428576717-172.17.0.3-1731710244171 (Datanode Uuid 913c4416-9ea9-44ed-8385-e512950dee45) service to localhost/127.0.0.1:41079 2024-11-15T22:37:39,663 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:37:39,663 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:37:39,663 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data1/current/BP-1428576717-172.17.0.3-1731710244171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:39,663 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data2/current/BP-1428576717-172.17.0.3-1731710244171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:39,664 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:37:39,669 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7., hostname=e611192d6313,44569,1731710246574, seqNum=2] 2024-11-15T22:37:39,671 ERROR [FSHLog-0-hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25-prefix:e611192d6313,44569,1731710246574 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:39,671 WARN [FSHLog-0-hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25-prefix:e611192d6313,44569,1731710246574 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:39,671 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e611192d6313%2C44569%2C1731710246574:(num 1731710247224) roll requested 2024-11-15T22:37:39,671 INFO [regionserver/e611192d6313:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C44569%2C1731710246574.1731710259671 2024-11-15T22:37:39,674 WARN [Thread-893 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:39,674 WARN [Thread-893 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK], DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]) is bad. 2024-11-15T22:37:39,674 WARN [Thread-893 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741838_1018 2024-11-15T22:37:39,676 WARN [Thread-893 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK] 2024-11-15T22:37:39,683 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:39,683 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:39,684 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:39,684 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:39,684 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:39,684 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710259671 2024-11-15T22:37:39,686 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:39,686 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:39,687 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34241:34241),(127.0.0.1/127.0.0.1:36625:36625)] 2024-11-15T22:37:39,687 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 is not closed yet, will try archiving it next time 2024-11-15T22:37:39,687 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-15T22:37:39,688 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-15T22:37:39,688 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 2024-11-15T22:37:39,691 WARN [IPC Server handler 0 on default port 41079 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-15T22:37:39,693 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 after 4ms 2024-11-15T22:37:39,840 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:40,683 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:41,687 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:41,689 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710259671 2024-11-15T22:37:41,689 WARN [ResponseProcessor for block BP-1428576717-172.17.0.3-1731710244171:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1428576717-172.17.0.3-1731710244171:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:41,690 WARN [DataStreamer for file /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710259671 block BP-1428576717-172.17.0.3-1731710244171:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK], DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK]) is bad. 2024-11-15T22:37:41,691 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:58092 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:41737:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58092 dst: /127.0.0.1:41737 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:41,691 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:34626 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:35127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34626 dst: /127.0.0.1:35127 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:41,759 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5e08dd81{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:41,759 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@178f342a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:37:41,759 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:37:41,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5455501c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:37:41,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fe58b15{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir/,STOPPED} 2024-11-15T22:37:41,761 WARN [BP-1428576717-172.17.0.3-1731710244171 heartbeating to localhost/127.0.0.1:41079 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:37:41,761 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:37:41,762 WARN [BP-1428576717-172.17.0.3-1731710244171 heartbeating to localhost/127.0.0.1:41079 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1428576717-172.17.0.3-1731710244171 (Datanode Uuid e4df1d4b-07c5-4b2d-b4fb-055f7e699967) service to localhost/127.0.0.1:41079 2024-11-15T22:37:41,762 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:37:41,762 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data7/current/BP-1428576717-172.17.0.3-1731710244171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:41,763 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data8/current/BP-1428576717-172.17.0.3-1731710244171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:41,763 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:37:41,840 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:42,683 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:43,688 WARN [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]] 2024-11-15T22:37:43,688 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:43,689 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e611192d6313%2C44569%2C1731710246574:(num 1731710259671) roll requested 2024-11-15T22:37:43,689 INFO [regionserver/e611192d6313:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C44569%2C1731710246574.1731710263689 2024-11-15T22:37:43,694 WARN [Thread-902 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:43,694 WARN [Thread-902 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK], DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK]) is bad. 2024-11-15T22:37:43,695 WARN [Thread-902 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741840_1022 2024-11-15T22:37:43,695 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 after 4007ms 2024-11-15T22:37:43,696 WARN [Thread-902 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK] 2024-11-15T22:37:43,700 WARN [Thread-902 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44063 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:43,700 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53656 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data10]'}, localName='127.0.0.1:40635', datanodeUuid='9535c085-55f4-456b-b220-53fa517075d9', xmitsInProgress=0}:Exception transferring block BP-1428576717-172.17.0.3-1731710244171:blk_1073741841_1023 to mirror 127.0.0.1:44063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:43,701 WARN [Thread-902 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK], DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]) is bad. 2024-11-15T22:37:43,701 WARN [Thread-902 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741841_1023 2024-11-15T22:37:43,701 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53656 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-15T22:37:43,701 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53656 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:40635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53656 dst: /127.0.0.1:40635 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:43,702 WARN [Thread-902 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK] 2024-11-15T22:37:43,706 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:43,706 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:43,706 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:43,707 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:43,707 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:43,707 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710259671 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710263689 2024-11-15T22:37:43,708 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38631:38631),(127.0.0.1/127.0.0.1:36625:36625)] 2024-11-15T22:37:43,708 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 is not closed yet, will try archiving it next time 2024-11-15T22:37:43,708 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710259671 is not closed yet, will try archiving it next time 2024-11-15T22:37:43,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35127 is added to blk_1073741839_1021 (size=2431) 2024-11-15T22:37:43,771 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T22:37:43,841 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:44,111 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 is not closed yet, will try archiving it next time 2024-11-15T22:37:44,684 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:45,708 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:45,780 WARN [ResponseProcessor for block BP-1428576717-172.17.0.3-1731710244171:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1428576717-172.17.0.3-1731710244171:blk_1073741842_1024 java.io.IOException: Bad response ERROR for BP-1428576717-172.17.0.3-1731710244171:blk_1073741842_1024 from datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:45,780 WARN [DataStreamer for file /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710263689 block BP-1428576717-172.17.0.3-1731710244171:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK], DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:37:45,780 WARN [PacketResponder: BP-1428576717-172.17.0.3-1731710244171:blk_1073741842_1024, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35127] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:45,781 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53668 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:40635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53668 dst: /127.0.0.1:40635 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:45,781 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:54012 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:35127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54012 dst: /127.0.0.1:35127 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:45,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1196c8fc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:45,815 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5580c33e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:37:45,815 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:37:45,815 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ed35b1e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:37:45,815 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ca82099{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir/,STOPPED} 2024-11-15T22:37:45,816 WARN [BP-1428576717-172.17.0.3-1731710244171 heartbeating to localhost/127.0.0.1:41079 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:37:45,816 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:37:45,816 WARN [BP-1428576717-172.17.0.3-1731710244171 heartbeating to localhost/127.0.0.1:41079 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1428576717-172.17.0.3-1731710244171 (Datanode Uuid c1b6fc5f-bb2e-4afe-bbcf-2c93c2a62fac) service to localhost/127.0.0.1:41079 2024-11-15T22:37:45,816 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:37:45,817 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data5/current/BP-1428576717-172.17.0.3-1731710244171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:45,817 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data6/current/BP-1428576717-172.17.0.3-1731710244171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:37:45,817 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:37:45,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44569 {}] regionserver.HRegion(8855): Flush requested on 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:45,827 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4876379c8ce36c0ab9ae28b3c1e67fd7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T22:37:45,841 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:45,851 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/498a8a1fa8864f35bbad9a6d43eda13f is 1080, key is row0002/info:/1731710261765/Put/seqid=0 2024-11-15T22:37:45,853 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:45,853 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK], DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:37:45,853 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741843_1026 2024-11-15T22:37:45,854 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:37:45,855 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:45,855 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK], DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]) is bad. 2024-11-15T22:37:45,855 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741844_1027 2024-11-15T22:37:45,856 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK] 2024-11-15T22:37:45,857 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:45,857 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK], DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]) is bad. 2024-11-15T22:37:45,857 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741845_1028 2024-11-15T22:37:45,858 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK] 2024-11-15T22:37:45,859 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:45,859 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK], DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK]) is bad. 2024-11-15T22:37:45,859 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741846_1029 2024-11-15T22:37:45,859 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK] 2024-11-15T22:37:45,860 WARN [IPC Server handler 1 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T22:37:45,860 WARN [IPC Server handler 1 on default port 41079 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T22:37:45,860 WARN [IPC Server handler 1 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T22:37:45,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741847_1030 (size=10347) 2024-11-15T22:37:46,264 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/498a8a1fa8864f35bbad9a6d43eda13f 2024-11-15T22:37:46,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/498a8a1fa8864f35bbad9a6d43eda13f as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/498a8a1fa8864f35bbad9a6d43eda13f 2024-11-15T22:37:46,279 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/498a8a1fa8864f35bbad9a6d43eda13f, entries=5, sequenceid=11, filesize=10.1 K 2024-11-15T22:37:46,280 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 4876379c8ce36c0ab9ae28b3c1e67fd7 in 453ms, sequenceid=11, compaction requested=false 2024-11-15T22:37:46,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4876379c8ce36c0ab9ae28b3c1e67fd7: 2024-11-15T22:37:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44569 {}] regionserver.HRegion(8855): Flush requested on 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:46,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4876379c8ce36c0ab9ae28b3c1e67fd7 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-15T22:37:46,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/8485b65fe9364ce3990ce5282b82d942 is 1080, key is row0007/info:/1731710265828/Put/seqid=0 2024-11-15T22:37:46,464 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44063 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:46,464 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53706 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data10]'}, localName='127.0.0.1:40635', datanodeUuid='9535c085-55f4-456b-b220-53fa517075d9', xmitsInProgress=0}:Exception transferring block BP-1428576717-172.17.0.3-1731710244171:blk_1073741848_1031 to mirror 127.0.0.1:44063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:46,464 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK], DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]) is bad. 2024-11-15T22:37:46,464 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741848_1031 2024-11-15T22:37:46,464 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53706 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T22:37:46,464 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53706 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:40635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53706 dst: /127.0.0.1:40635 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:46,465 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK] 2024-11-15T22:37:46,466 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:46,466 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK], DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:37:46,466 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741849_1032 2024-11-15T22:37:46,467 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:37:46,468 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:46,468 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK], DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK]) is bad. 2024-11-15T22:37:46,468 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741850_1033 2024-11-15T22:37:46,469 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK] 2024-11-15T22:37:46,471 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33791 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:46,470 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53716 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data10]'}, localName='127.0.0.1:40635', datanodeUuid='9535c085-55f4-456b-b220-53fa517075d9', xmitsInProgress=0}:Exception transferring block BP-1428576717-172.17.0.3-1731710244171:blk_1073741851_1034 to mirror 127.0.0.1:33791 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:46,471 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK], DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]) is bad. 2024-11-15T22:37:46,471 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741851_1034 2024-11-15T22:37:46,471 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53716 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T22:37:46,471 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53716 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:40635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53716 dst: /127.0.0.1:40635 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:46,471 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK] 2024-11-15T22:37:46,472 WARN [IPC Server handler 1 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T22:37:46,472 WARN [IPC Server handler 1 on default port 41079 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T22:37:46,472 WARN [IPC Server handler 1 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T22:37:46,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741852_1035 (size=12506) 2024-11-15T22:37:46,637 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@155a9bf4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40635, datanodeUuid=9535c085-55f4-456b-b220-53fa517075d9, infoPort=38631, infoSecurePort=0, ipcPort=37191, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171):Failed to transfer BP-1428576717-172.17.0.3-1731710244171:blk_1073741847_1030 to 127.0.0.1:35127 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:46,684 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:46,876 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/8485b65fe9364ce3990ce5282b82d942 2024-11-15T22:37:46,885 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/8485b65fe9364ce3990ce5282b82d942 as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/8485b65fe9364ce3990ce5282b82d942 2024-11-15T22:37:46,893 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/8485b65fe9364ce3990ce5282b82d942, entries=7, sequenceid=24, filesize=12.2 K 2024-11-15T22:37:46,894 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 4876379c8ce36c0ab9ae28b3c1e67fd7 in 439ms, sequenceid=24, compaction requested=false 2024-11-15T22:37:46,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4876379c8ce36c0ab9ae28b3c1e67fd7: 2024-11-15T22:37:46,895 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-15T22:37:46,895 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:37:46,895 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/8485b65fe9364ce3990ce5282b82d942 because midkey is the same as first or last row 2024-11-15T22:37:47,709 WARN [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK]] 2024-11-15T22:37:47,709 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:47,709 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e611192d6313%2C44569%2C1731710246574:(num 1731710263689) roll requested 2024-11-15T22:37:47,710 INFO [regionserver/e611192d6313:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C44569%2C1731710246574.1731710267709 2024-11-15T22:37:47,713 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:47,713 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK], DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK]) is bad. 2024-11-15T22:37:47,713 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741853_1036 2024-11-15T22:37:47,714 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK] 2024-11-15T22:37:47,716 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:47,717 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK], DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:37:47,717 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741854_1037 2024-11-15T22:37:47,718 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:37:47,721 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33791 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:47,721 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53732 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data10]'}, localName='127.0.0.1:40635', datanodeUuid='9535c085-55f4-456b-b220-53fa517075d9', xmitsInProgress=0}:Exception transferring block BP-1428576717-172.17.0.3-1731710244171:blk_1073741855_1038 to mirror 127.0.0.1:33791 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:47,721 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK], DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]) is bad. 2024-11-15T22:37:47,721 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741855_1038 2024-11-15T22:37:47,721 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53732 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-15T22:37:47,721 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53732 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:40635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53732 dst: /127.0.0.1:40635 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:47,722 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK] 2024-11-15T22:37:47,724 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44063 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:47,724 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53736 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data10]'}, localName='127.0.0.1:40635', datanodeUuid='9535c085-55f4-456b-b220-53fa517075d9', xmitsInProgress=0}:Exception transferring block BP-1428576717-172.17.0.3-1731710244171:blk_1073741856_1039 to mirror 127.0.0.1:44063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:47,724 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK], DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]) is bad. 2024-11-15T22:37:47,724 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741856_1039 2024-11-15T22:37:47,724 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53736 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-15T22:37:47,724 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53736 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:40635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53736 dst: /127.0.0.1:40635 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:47,725 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK] 2024-11-15T22:37:47,726 WARN [IPC Server handler 4 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T22:37:47,726 WARN [IPC Server handler 4 on default port 41079 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T22:37:47,726 WARN [IPC Server handler 4 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T22:37:47,729 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:47,729 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:47,729 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:47,729 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:47,729 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:47,729 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710263689 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710267709 2024-11-15T22:37:47,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741842_1025 (size=25992) 2024-11-15T22:37:47,732 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38631:38631)] 2024-11-15T22:37:47,732 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 is not closed yet, will try archiving it next time 2024-11-15T22:37:47,732 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710263689 is not closed yet, will try archiving it next time 2024-11-15T22:37:47,733 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710259671 to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/oldWALs/e611192d6313%2C44569%2C1731710246574.1731710259671 2024-11-15T22:37:47,841 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:47,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44569 {}] regionserver.HRegion(8855): Flush requested on 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:47,880 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4876379c8ce36c0ab9ae28b3c1e67fd7 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T22:37:47,889 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/0c1ce5c069034113979cfaa51b16985b is 1079, key is tmprow/info:/1731710267877/Put/seqid=0 2024-11-15T22:37:47,892 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44063 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:47,892 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53754 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data10]'}, localName='127.0.0.1:40635', datanodeUuid='9535c085-55f4-456b-b220-53fa517075d9', xmitsInProgress=0}:Exception transferring block BP-1428576717-172.17.0.3-1731710244171:blk_1073741858_1041 to mirror 127.0.0.1:44063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:47,892 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK], DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]) is bad. 2024-11-15T22:37:47,892 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741858_1041 2024-11-15T22:37:47,892 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53754 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T22:37:47,892 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53754 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:40635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53754 dst: /127.0.0.1:40635 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:47,893 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK] 2024-11-15T22:37:47,894 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:47,894 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK], DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK]) is bad. 2024-11-15T22:37:47,894 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741859_1042 2024-11-15T22:37:47,895 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK] 2024-11-15T22:37:47,897 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35127 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:47,897 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53764 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data10]'}, localName='127.0.0.1:40635', datanodeUuid='9535c085-55f4-456b-b220-53fa517075d9', xmitsInProgress=0}:Exception transferring block BP-1428576717-172.17.0.3-1731710244171:blk_1073741860_1043 to mirror 127.0.0.1:35127 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:47,897 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK], DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:37:47,897 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741860_1043 2024-11-15T22:37:47,897 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53764 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T22:37:47,897 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53764 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:40635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53764 dst: /127.0.0.1:40635 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:47,898 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:37:47,900 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33791 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:47,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53766 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data10]'}, localName='127.0.0.1:40635', datanodeUuid='9535c085-55f4-456b-b220-53fa517075d9', xmitsInProgress=0}:Exception transferring block BP-1428576717-172.17.0.3-1731710244171:blk_1073741861_1044 to mirror 127.0.0.1:33791 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:47,900 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK], DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]) is bad. 2024-11-15T22:37:47,900 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741861_1044 2024-11-15T22:37:47,900 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53766 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T22:37:47,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53766 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:40635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53766 dst: /127.0.0.1:40635 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:47,901 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK] 2024-11-15T22:37:47,901 WARN [IPC Server handler 3 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T22:37:47,901 WARN [IPC Server handler 3 on default port 41079 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T22:37:47,901 WARN [IPC Server handler 3 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T22:37:47,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741862_1045 (size=6027) 2024-11-15T22:37:48,133 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 is not closed yet, will try archiving it next time 2024-11-15T22:37:48,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/0c1ce5c069034113979cfaa51b16985b 2024-11-15T22:37:48,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/0c1ce5c069034113979cfaa51b16985b as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/0c1ce5c069034113979cfaa51b16985b 2024-11-15T22:37:48,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/0c1ce5c069034113979cfaa51b16985b, entries=1, sequenceid=34, filesize=5.9 K 2024-11-15T22:37:48,326 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 4876379c8ce36c0ab9ae28b3c1e67fd7 in 446ms, sequenceid=34, compaction requested=true 2024-11-15T22:37:48,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4876379c8ce36c0ab9ae28b3c1e67fd7: 2024-11-15T22:37:48,326 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-15T22:37:48,326 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:37:48,326 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/8485b65fe9364ce3990ce5282b82d942 because midkey is the same as first or last row 2024-11-15T22:37:48,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4876379c8ce36c0ab9ae28b3c1e67fd7:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T22:37:48,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:37:48,326 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T22:37:48,328 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T22:37:48,328 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.HStore(1541): 4876379c8ce36c0ab9ae28b3c1e67fd7/info is initiating minor compaction (all files) 2024-11-15T22:37:48,328 INFO [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4876379c8ce36c0ab9ae28b3c1e67fd7/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:37:48,328 INFO [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/498a8a1fa8864f35bbad9a6d43eda13f, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/8485b65fe9364ce3990ce5282b82d942, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/0c1ce5c069034113979cfaa51b16985b] into tmpdir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp, totalSize=28.2 K 2024-11-15T22:37:48,328 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] compactions.Compactor(225): Compacting 498a8a1fa8864f35bbad9a6d43eda13f, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731710261765 2024-11-15T22:37:48,329 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8485b65fe9364ce3990ce5282b82d942, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731710265828 2024-11-15T22:37:48,329 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0c1ce5c069034113979cfaa51b16985b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731710267877 2024-11-15T22:37:48,341 INFO [RS:0;e611192d6313:44569-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4876379c8ce36c0ab9ae28b3c1e67fd7#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:37:48,341 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/ead7aca831e4431a8a56cd7ecef6030a is 1080, key is row0002/info:/1731710261765/Put/seqid=0 2024-11-15T22:37:48,343 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:48,343 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK], DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK]) is bad. 2024-11-15T22:37:48,343 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741863_1046 2024-11-15T22:37:48,344 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK] 2024-11-15T22:37:48,345 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:48,345 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK], DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]) is bad. 2024-11-15T22:37:48,345 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741864_1047 2024-11-15T22:37:48,346 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK] 2024-11-15T22:37:48,347 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:48,347 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK], DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]) is bad. 2024-11-15T22:37:48,347 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741865_1048 2024-11-15T22:37:48,347 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK] 2024-11-15T22:37:48,348 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:48,349 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK], DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:37:48,349 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741866_1049 2024-11-15T22:37:48,349 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:37:48,350 WARN [IPC Server handler 1 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T22:37:48,350 WARN [IPC Server handler 1 on default port 41079 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T22:37:48,350 WARN [IPC Server handler 1 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T22:37:48,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741867_1050 (size=17994) 2024-11-15T22:37:48,685 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:48,770 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/ead7aca831e4431a8a56cd7ecef6030a as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ead7aca831e4431a8a56cd7ecef6030a 2024-11-15T22:37:48,777 INFO [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4876379c8ce36c0ab9ae28b3c1e67fd7/info of 4876379c8ce36c0ab9ae28b3c1e67fd7 into ead7aca831e4431a8a56cd7ecef6030a(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:37:48,777 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4876379c8ce36c0ab9ae28b3c1e67fd7: 2024-11-15T22:37:48,777 INFO [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7., storeName=4876379c8ce36c0ab9ae28b3c1e67fd7/info, priority=13, startTime=1731710268326; duration=0sec 2024-11-15T22:37:48,778 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-15T22:37:48,778 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:37:48,778 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ead7aca831e4431a8a56cd7ecef6030a because midkey is the same as first or last row 2024-11-15T22:37:48,778 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-15T22:37:48,778 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:37:48,778 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ead7aca831e4431a8a56cd7ecef6030a because midkey is the same as first or last row 2024-11-15T22:37:48,778 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-15T22:37:48,778 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:37:48,778 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ead7aca831e4431a8a56cd7ecef6030a because midkey is the same as first or last row 2024-11-15T22:37:48,778 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:37:48,778 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4876379c8ce36c0ab9ae28b3c1e67fd7:info 2024-11-15T22:37:49,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44569 {}] regionserver.HRegion(8855): Flush requested on 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:49,317 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4876379c8ce36c0ab9ae28b3c1e67fd7 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T22:37:49,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/b7fbc31b9d7c40e3b7af0df6916a8000 is 1079, key is tmprow/info:/1731710269313/Put/seqid=0 2024-11-15T22:37:49,328 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33791 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:49,328 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53808 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data10]'}, localName='127.0.0.1:40635', datanodeUuid='9535c085-55f4-456b-b220-53fa517075d9', xmitsInProgress=0}:Exception transferring block BP-1428576717-172.17.0.3-1731710244171:blk_1073741868_1051 to mirror 127.0.0.1:33791 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:49,328 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK], DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]) is bad. 2024-11-15T22:37:49,328 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53808 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T22:37:49,328 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741868_1051 2024-11-15T22:37:49,328 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:53808 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:40635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53808 dst: /127.0.0.1:40635 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:49,329 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK] 2024-11-15T22:37:49,331 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:49,331 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK], DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]) is bad. 2024-11-15T22:37:49,331 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741869_1052 2024-11-15T22:37:49,332 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK] 2024-11-15T22:37:49,333 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:49,333 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK], DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK]) is bad. 2024-11-15T22:37:49,334 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741870_1053 2024-11-15T22:37:49,334 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK] 2024-11-15T22:37:49,336 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:49,336 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK], DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:37:49,337 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741871_1054 2024-11-15T22:37:49,337 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:37:49,338 WARN [IPC Server handler 0 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T22:37:49,338 WARN [IPC Server handler 0 on default port 41079 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T22:37:49,338 WARN [IPC Server handler 0 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T22:37:49,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741872_1055 (size=6027) 2024-11-15T22:37:49,627 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@155a9bf4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40635, datanodeUuid=9535c085-55f4-456b-b220-53fa517075d9, infoPort=38631, infoSecurePort=0, ipcPort=37191, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171):Failed to transfer BP-1428576717-172.17.0.3-1731710244171:blk_1073741852_1035 to 127.0.0.1:35127 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:49,628 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1c8c18ff[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40635, datanodeUuid=9535c085-55f4-456b-b220-53fa517075d9, infoPort=38631, infoSecurePort=0, ipcPort=37191, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171):Failed to transfer BP-1428576717-172.17.0.3-1731710244171:blk_1073741842_1025 to 127.0.0.1:44063 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:49,733 WARN [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK]] 2024-11-15T22:37:49,733 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:49,733 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e611192d6313%2C44569%2C1731710246574:(num 1731710267709) roll requested 2024-11-15T22:37:49,734 INFO [regionserver/e611192d6313:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C44569%2C1731710246574.1731710269734 2024-11-15T22:37:49,740 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:49,740 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK], DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK]) is bad. 2024-11-15T22:37:49,740 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741873_1056 2024-11-15T22:37:49,741 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK] 2024-11-15T22:37:49,743 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/b7fbc31b9d7c40e3b7af0df6916a8000 2024-11-15T22:37:49,744 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:49,744 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK], DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:37:49,744 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741874_1057 2024-11-15T22:37:49,745 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:37:49,747 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:49,747 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK], DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]) is bad. 2024-11-15T22:37:49,747 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741875_1058 2024-11-15T22:37:49,748 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK] 2024-11-15T22:37:49,750 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:49,750 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK], DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]) is bad. 2024-11-15T22:37:49,750 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741876_1059 2024-11-15T22:37:49,751 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK] 2024-11-15T22:37:49,752 WARN [IPC Server handler 3 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T22:37:49,752 WARN [IPC Server handler 3 on default port 41079 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T22:37:49,752 WARN [IPC Server handler 3 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T22:37:49,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/b7fbc31b9d7c40e3b7af0df6916a8000 as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/b7fbc31b9d7c40e3b7af0df6916a8000 2024-11-15T22:37:49,755 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:49,755 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:49,755 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:49,756 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:49,756 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:49,756 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710267709 with entries=14, filesize=12.92 KB; new WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710269734 2024-11-15T22:37:49,757 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38631:38631)] 2024-11-15T22:37:49,757 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 is not closed yet, will try archiving it next time 2024-11-15T22:37:49,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741857_1040 (size=13234) 2024-11-15T22:37:49,757 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710267709 is not closed yet, will try archiving it next time 2024-11-15T22:37:49,757 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710263689 to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/oldWALs/e611192d6313%2C44569%2C1731710246574.1731710263689 2024-11-15T22:37:49,758 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 is not closed yet, will try archiving it next time 2024-11-15T22:37:49,761 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/b7fbc31b9d7c40e3b7af0df6916a8000, entries=1, sequenceid=45, filesize=5.9 K 2024-11-15T22:37:49,762 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 4876379c8ce36c0ab9ae28b3c1e67fd7 in 446ms, sequenceid=45, compaction requested=false 2024-11-15T22:37:49,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4876379c8ce36c0ab9ae28b3c1e67fd7: 2024-11-15T22:37:49,762 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-15T22:37:49,762 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:37:49,762 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ead7aca831e4431a8a56cd7ecef6030a because midkey is the same as first or last row 2024-11-15T22:37:49,842 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:50,626 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1c8c18ff[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40635, datanodeUuid=9535c085-55f4-456b-b220-53fa517075d9, infoPort=38631, infoSecurePort=0, ipcPort=37191, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171):Failed to transfer BP-1428576717-172.17.0.3-1731710244171:blk_1073741862_1045 to 127.0.0.1:41737 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:50,626 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@155a9bf4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40635, datanodeUuid=9535c085-55f4-456b-b220-53fa517075d9, infoPort=38631, infoSecurePort=0, ipcPort=37191, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171):Failed to transfer BP-1428576717-172.17.0.3-1731710244171:blk_1073741867_1050 to 127.0.0.1:33791 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:50,685 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:50,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44569 {}] regionserver.HRegion(8855): Flush requested on 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:37:50,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4876379c8ce36c0ab9ae28b3c1e67fd7 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T22:37:50,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/ccde1d411eb145ab930458c373d1dcf3 is 1079, key is tmprow/info:/1731710270756/Put/seqid=0 2024-11-15T22:37:50,767 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:50,768 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK], DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]) is bad. 2024-11-15T22:37:50,768 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741878_1061 2024-11-15T22:37:50,768 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK] 2024-11-15T22:37:50,770 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:50,770 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK], DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:37:50,770 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741879_1062 2024-11-15T22:37:50,770 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:37:50,772 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:50,772 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK], DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]) is bad. 2024-11-15T22:37:50,772 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741880_1063 2024-11-15T22:37:50,773 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK] 2024-11-15T22:37:50,774 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:50,774 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK], DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK]) is bad. 2024-11-15T22:37:50,774 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741881_1064 2024-11-15T22:37:50,775 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK] 2024-11-15T22:37:50,775 WARN [IPC Server handler 3 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T22:37:50,775 WARN [IPC Server handler 3 on default port 41079 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T22:37:50,776 WARN [IPC Server handler 3 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T22:37:50,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741882_1065 (size=6027) 2024-11-15T22:37:51,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/ccde1d411eb145ab930458c373d1dcf3 2024-11-15T22:37:51,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/ccde1d411eb145ab930458c373d1dcf3 as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ccde1d411eb145ab930458c373d1dcf3 2024-11-15T22:37:51,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ccde1d411eb145ab930458c373d1dcf3, entries=1, sequenceid=55, filesize=5.9 K 2024-11-15T22:37:51,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 4876379c8ce36c0ab9ae28b3c1e67fd7 in 444ms, sequenceid=55, compaction requested=true 2024-11-15T22:37:51,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4876379c8ce36c0ab9ae28b3c1e67fd7: 2024-11-15T22:37:51,203 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-15T22:37:51,203 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:37:51,203 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ead7aca831e4431a8a56cd7ecef6030a because midkey is the same as first or last row 2024-11-15T22:37:51,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4876379c8ce36c0ab9ae28b3c1e67fd7:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T22:37:51,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:37:51,203 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T22:37:51,204 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T22:37:51,204 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.HStore(1541): 4876379c8ce36c0ab9ae28b3c1e67fd7/info is initiating minor compaction (all files) 2024-11-15T22:37:51,204 INFO [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4876379c8ce36c0ab9ae28b3c1e67fd7/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:37:51,205 INFO [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ead7aca831e4431a8a56cd7ecef6030a, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/b7fbc31b9d7c40e3b7af0df6916a8000, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ccde1d411eb145ab930458c373d1dcf3] into tmpdir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp, totalSize=29.3 K 2024-11-15T22:37:51,205 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] compactions.Compactor(225): Compacting ead7aca831e4431a8a56cd7ecef6030a, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731710261765 2024-11-15T22:37:51,206 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] compactions.Compactor(225): Compacting b7fbc31b9d7c40e3b7af0df6916a8000, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731710269313 2024-11-15T22:37:51,206 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] compactions.Compactor(225): Compacting ccde1d411eb145ab930458c373d1dcf3, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731710270756 2024-11-15T22:37:51,223 INFO [RS:0;e611192d6313:44569-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4876379c8ce36c0ab9ae28b3c1e67fd7#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:37:51,223 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/af2ce8e8276a425ab10df6de1714334e is 1080, key is row0002/info:/1731710261765/Put/seqid=0 2024-11-15T22:37:51,225 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:51,225 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK], DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK]) is bad. 2024-11-15T22:37:51,225 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741883_1066 2024-11-15T22:37:51,226 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK] 2024-11-15T22:37:51,227 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:51,227 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK], DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]) is bad. 2024-11-15T22:37:51,227 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741884_1067 2024-11-15T22:37:51,227 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK] 2024-11-15T22:37:51,228 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:51,228 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK], DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]) is bad. 2024-11-15T22:37:51,228 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741885_1068 2024-11-15T22:37:51,229 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44063,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK] 2024-11-15T22:37:51,230 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:51,230 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK], DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:37:51,230 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741886_1069 2024-11-15T22:37:51,230 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:37:51,231 WARN [IPC Server handler 3 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T22:37:51,231 WARN [IPC Server handler 3 on default port 41079 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T22:37:51,231 WARN [IPC Server handler 3 on default port 41079 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T22:37:51,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741887_1070 (size=18097) 2024-11-15T22:37:51,651 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/af2ce8e8276a425ab10df6de1714334e as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/af2ce8e8276a425ab10df6de1714334e 2024-11-15T22:37:51,659 INFO [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4876379c8ce36c0ab9ae28b3c1e67fd7/info of 4876379c8ce36c0ab9ae28b3c1e67fd7 into af2ce8e8276a425ab10df6de1714334e(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:37:51,660 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4876379c8ce36c0ab9ae28b3c1e67fd7: 2024-11-15T22:37:51,660 INFO [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7., storeName=4876379c8ce36c0ab9ae28b3c1e67fd7/info, priority=13, startTime=1731710271203; duration=0sec 2024-11-15T22:37:51,660 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-15T22:37:51,660 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:37:51,660 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/af2ce8e8276a425ab10df6de1714334e because midkey is the same as first or last row 2024-11-15T22:37:51,660 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-15T22:37:51,660 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:37:51,660 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/af2ce8e8276a425ab10df6de1714334e because midkey is the same as first or last row 2024-11-15T22:37:51,660 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-15T22:37:51,660 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:37:51,660 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/af2ce8e8276a425ab10df6de1714334e because midkey is the same as first or last row 2024-11-15T22:37:51,661 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:37:51,661 DEBUG [RS:0;e611192d6313:44569-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4876379c8ce36c0ab9ae28b3c1e67fd7:info 2024-11-15T22:37:51,758 WARN [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-15T22:37:51,758 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:51,793 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:37:51,798 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:37:51,798 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:37:51,799 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:37:51,799 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T22:37:51,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@796aacdc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:37:51,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3edaa0c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:37:51,842 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:51,893 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30d4c51a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/java.io.tmpdir/jetty-localhost-36103-hadoop-hdfs-3_4_1-tests_jar-_-any-12416478829718439778/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:37:51,893 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34827478{HTTP/1.1, (http/1.1)}{localhost:36103} 2024-11-15T22:37:51,894 INFO [Time-limited test {}] server.Server(415): Started @130051ms 2024-11-15T22:37:51,895 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:37:52,341 WARN [Thread-977 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:37:52,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc892d854173aefdb with lease ID 0xaba062ed3263d2be: from storage DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3 node DatanodeRegistration(127.0.0.1:32921, datanodeUuid=581fc627-72c4-42db-bdf9-451bbec9abcb, infoPort=33327, infoSecurePort=0, ipcPort=37415, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T22:37:52,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc892d854173aefdb with lease ID 0xaba062ed3263d2be: from storage DS-977b2219-6b5d-4a8e-b782-9c30294d9944 node DatanodeRegistration(127.0.0.1:32921, datanodeUuid=581fc627-72c4-42db-bdf9-451bbec9abcb, infoPort=33327, infoSecurePort=0, ipcPort=37415, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:37:52,627 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@155a9bf4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40635, datanodeUuid=9535c085-55f4-456b-b220-53fa517075d9, infoPort=38631, infoSecurePort=0, ipcPort=37191, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171):Failed to transfer BP-1428576717-172.17.0.3-1731710244171:blk_1073741872_1055 to 127.0.0.1:35127 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:52,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741857_1040 (size=13234) 2024-11-15T22:37:52,686 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:53,626 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@155a9bf4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40635, datanodeUuid=9535c085-55f4-456b-b220-53fa517075d9, infoPort=38631, infoSecurePort=0, ipcPort=37191, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171):Failed to transfer BP-1428576717-172.17.0.3-1731710244171:blk_1073741887_1070 to 127.0.0.1:35127 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:53,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741882_1065 (size=6027) 2024-11-15T22:37:53,758 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:53,843 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:54,686 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:55,759 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:55,843 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:56,378 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T22:37:56,687 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:56,921 ERROR [FSHLog-0-hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData-prefix:e611192d6313,46573,1731710246399 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:56,921 WARN [FSHLog-0-hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData-prefix:e611192d6313,46573,1731710246399 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:56,921 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog e611192d6313%2C46573%2C1731710246399:(num 1731710246704) roll requested 2024-11-15T22:37:56,922 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C46573%2C1731710246399.1731710276922 2024-11-15T22:37:56,929 WARN [Thread-999 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41737 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:56,929 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_827244624_22 at /127.0.0.1:53152 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741888_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data4]'}, localName='127.0.0.1:32921', datanodeUuid='581fc627-72c4-42db-bdf9-451bbec9abcb', xmitsInProgress=0}:Exception transferring block BP-1428576717-172.17.0.3-1731710244171:blk_1073741888_1071 to mirror 127.0.0.1:41737 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:56,930 WARN [Thread-999 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32921,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK], DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK]) is bad. 2024-11-15T22:37:56,930 WARN [Thread-999 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741888_1071 2024-11-15T22:37:56,930 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_827244624_22 at /127.0.0.1:53152 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741888_1071] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-15T22:37:56,930 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_827244624_22 at /127.0.0.1:53152 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741888_1071] {}] datanode.DataXceiver(331): 127.0.0.1:32921:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53152 dst: /127.0.0.1:32921 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:37:56,931 WARN [Thread-999 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41737,DS-426e18f8-86b4-4d9b-a2cc-c8cc655ad224,DISK] 2024-11-15T22:37:56,933 WARN [Thread-999 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:56,934 WARN [Thread-999 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK], DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:37:56,934 WARN [Thread-999 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741889_1072 2024-11-15T22:37:56,935 WARN [Thread-999 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:37:56,936 WARN [Thread-999 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:56,936 WARN [Thread-999 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741890_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK], DatanodeInfoWithStorage[127.0.0.1:32921,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]) is bad. 2024-11-15T22:37:56,936 WARN [Thread-999 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741890_1073 2024-11-15T22:37:56,937 WARN [Thread-999 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK] 2024-11-15T22:37:56,943 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:56,943 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:56,943 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:56,943 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:56,943 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:37:56,943 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/WALs/e611192d6313,46573,1731710246399/e611192d6313%2C46573%2C1731710246399.1731710246704 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/WALs/e611192d6313,46573,1731710246399/e611192d6313%2C46573%2C1731710246399.1731710276922 2024-11-15T22:37:56,944 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:56,944 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:56,944 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/WALs/e611192d6313,46573,1731710246399/e611192d6313%2C46573%2C1731710246399.1731710246704 2024-11-15T22:37:56,945 WARN [IPC Server handler 2 on default port 41079 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/WALs/e611192d6313,46573,1731710246399/e611192d6313%2C46573%2C1731710246399.1731710246704 has not been closed. Lease recovery is in progress. RecoveryId = 1075 for block blk_1073741830_1006 2024-11-15T22:37:56,945 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/WALs/e611192d6313,46573,1731710246399/e611192d6313%2C46573%2C1731710246399.1731710246704 after 1ms 2024-11-15T22:37:56,946 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38631:38631),(127.0.0.1/127.0.0.1:33327:33327)] 2024-11-15T22:37:56,946 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/WALs/e611192d6313,46573,1731710246399/e611192d6313%2C46573%2C1731710246399.1731710246704 is not closed yet, will try archiving it next time 2024-11-15T22:37:57,760 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:57,844 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:59,761 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:37:59,845 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:00,948 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/WALs/e611192d6313,46573,1731710246399/e611192d6313%2C46573%2C1731710246399.1731710246704 after 4004ms 2024-11-15T22:38:01,761 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:01,845 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:02,363 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1f33606e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1428576717-172.17.0.3-1731710244171:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:33791,null,null]) java.net.ConnectException: Call From e611192d6313/172.17.0.3 to localhost:39447 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-15T22:38:02,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741833_1020 (size=455) 2024-11-15T22:38:02,727 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710247224 to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/oldWALs/e611192d6313%2C44569%2C1731710246574.1731710247224 2024-11-15T22:38:02,731 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710267709 to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/oldWALs/e611192d6313%2C44569%2C1731710246574.1731710267709 2024-11-15T22:38:03,762 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:03,846 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:04,350 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7ba8b02a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32921, datanodeUuid=581fc627-72c4-42db-bdf9-451bbec9abcb, infoPort=33327, infoSecurePort=0, ipcPort=37415, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171):Failed to transfer BP-1428576717-172.17.0.3-1731710244171:blk_1073741833_1020 to 127.0.0.1:41737 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:05,519 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C44569%2C1731710246574.1731710285518 2024-11-15T22:38:05,528 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:05,528 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:05,528 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:05,529 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:05,529 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:05,529 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710269734 with entries=13, filesize=11.81 KB; new WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710285518 2024-11-15T22:38:05,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741877_1060 (size=12100) 2024-11-15T22:38:05,530 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33327:33327),(127.0.0.1/127.0.0.1:38631:38631)] 2024-11-15T22:38:05,530 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710269734 is not closed yet, will try archiving it next time 2024-11-15T22:38:05,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44569 {}] regionserver.HRegion(8855): Flush requested on 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:38:05,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4876379c8ce36c0ab9ae28b3c1e67fd7 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T22:38:05,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/4f9100590e2742c98a97ed3f2664eb9c is 1080, key is row0013/info:/1731710285531/Put/seqid=0 2024-11-15T22:38:05,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741893_1077 (size=9267) 2024-11-15T22:38:05,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741893_1077 (size=9267) 2024-11-15T22:38:05,548 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/4f9100590e2742c98a97ed3f2664eb9c 2024-11-15T22:38:05,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/4f9100590e2742c98a97ed3f2664eb9c as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/4f9100590e2742c98a97ed3f2664eb9c 2024-11-15T22:38:05,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/4f9100590e2742c98a97ed3f2664eb9c, entries=4, sequenceid=66, filesize=9.0 K 2024-11-15T22:38:05,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7528, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8607 for 4876379c8ce36c0ab9ae28b3c1e67fd7 in 27ms, sequenceid=66, compaction requested=false 2024-11-15T22:38:05,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4876379c8ce36c0ab9ae28b3c1e67fd7: 2024-11-15T22:38:05,564 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=26.7 K, sizeToCheck=16.0 K 2024-11-15T22:38:05,564 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:38:05,564 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/af2ce8e8276a425ab10df6de1714334e because midkey is the same as first or last row 2024-11-15T22:38:05,763 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-15T22:38:05,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44569 {}] regionserver.HRegion(8855): Flush requested on 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:38:05,763 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:05,763 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4876379c8ce36c0ab9ae28b3c1e67fd7 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-15T22:38:05,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/5f94a6c67e6e43caa9e16877e15f2128 is 1080, key is row0016/info:/1731710285538/Put/seqid=0 2024-11-15T22:38:05,777 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35127 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:05,777 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:35738 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741894_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data10]'}, localName='127.0.0.1:40635', datanodeUuid='9535c085-55f4-456b-b220-53fa517075d9', xmitsInProgress=0}:Exception transferring block BP-1428576717-172.17.0.3-1731710244171:blk_1073741894_1078 to mirror 127.0.0.1:35127 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:05,777 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK], DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:38:05,777 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:35738 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741894_1078] {}] datanode.BlockReceiver(316): Block 1073741894 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T22:38:05,777 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741894_1078 2024-11-15T22:38:05,777 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:35738 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741894_1078] {}] datanode.DataXceiver(331): 127.0.0.1:40635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35738 dst: /127.0.0.1:40635 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:05,778 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:38:05,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741895_1079 (size=13583) 2024-11-15T22:38:05,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741895_1079 (size=13583) 2024-11-15T22:38:05,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/5f94a6c67e6e43caa9e16877e15f2128 2024-11-15T22:38:05,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/5f94a6c67e6e43caa9e16877e15f2128 as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/5f94a6c67e6e43caa9e16877e15f2128 2024-11-15T22:38:05,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/5f94a6c67e6e43caa9e16877e15f2128, entries=8, sequenceid=78, filesize=13.3 K 2024-11-15T22:38:05,796 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9682, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 4876379c8ce36c0ab9ae28b3c1e67fd7 in 33ms, sequenceid=78, compaction requested=true 2024-11-15T22:38:05,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4876379c8ce36c0ab9ae28b3c1e67fd7: 2024-11-15T22:38:05,796 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-11-15T22:38:05,796 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:38:05,796 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/af2ce8e8276a425ab10df6de1714334e because midkey is the same as first or last row 2024-11-15T22:38:05,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4876379c8ce36c0ab9ae28b3c1e67fd7:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T22:38:05,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:38:05,797 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T22:38:05,798 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T22:38:05,798 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.HStore(1541): 4876379c8ce36c0ab9ae28b3c1e67fd7/info is initiating minor compaction (all files) 2024-11-15T22:38:05,798 INFO [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4876379c8ce36c0ab9ae28b3c1e67fd7/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:38:05,798 INFO [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/af2ce8e8276a425ab10df6de1714334e, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/4f9100590e2742c98a97ed3f2664eb9c, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/5f94a6c67e6e43caa9e16877e15f2128] into tmpdir=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp, totalSize=40.0 K 2024-11-15T22:38:05,798 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] compactions.Compactor(225): Compacting af2ce8e8276a425ab10df6de1714334e, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731710261765 2024-11-15T22:38:05,799 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] compactions.Compactor(225): Compacting 4f9100590e2742c98a97ed3f2664eb9c, keycount=4, bloomtype=ROW, size=9.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1731710271576 2024-11-15T22:38:05,799 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] compactions.Compactor(225): Compacting 5f94a6c67e6e43caa9e16877e15f2128, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1731710285538 2024-11-15T22:38:05,810 INFO [RS:0;e611192d6313:44569-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4876379c8ce36c0ab9ae28b3c1e67fd7#info#compaction#27 average throughput is 22.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:38:05,811 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/3c99199ab7aa4828b6058112f890868f is 1080, key is row0002/info:/1731710261765/Put/seqid=0 2024-11-15T22:38:05,813 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1080 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35127 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:05,813 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:35786 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741896_1080] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data10]'}, localName='127.0.0.1:40635', datanodeUuid='9535c085-55f4-456b-b220-53fa517075d9', xmitsInProgress=0}:Exception transferring block BP-1428576717-172.17.0.3-1731710244171:blk_1073741896_1080 to mirror 127.0.0.1:35127 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:05,813 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741896_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK], DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:38:05,813 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741896_1080 2024-11-15T22:38:05,813 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:35786 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741896_1080] {}] datanode.BlockReceiver(316): Block 1073741896 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T22:38:05,814 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:35786 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741896_1080] {}] datanode.DataXceiver(331): 127.0.0.1:40635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35786 dst: /127.0.0.1:40635 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:05,814 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:38:05,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741897_1081 (size=28989) 2024-11-15T22:38:05,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741897_1081 (size=28989) 2024-11-15T22:38:05,825 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/.tmp/info/3c99199ab7aa4828b6058112f890868f as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/3c99199ab7aa4828b6058112f890868f 2024-11-15T22:38:05,832 INFO [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4876379c8ce36c0ab9ae28b3c1e67fd7/info of 4876379c8ce36c0ab9ae28b3c1e67fd7 into 3c99199ab7aa4828b6058112f890868f(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:38:05,832 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4876379c8ce36c0ab9ae28b3c1e67fd7: 2024-11-15T22:38:05,833 INFO [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7., storeName=4876379c8ce36c0ab9ae28b3c1e67fd7/info, priority=13, startTime=1731710285796; duration=0sec 2024-11-15T22:38:05,833 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-15T22:38:05,833 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:38:05,833 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/3c99199ab7aa4828b6058112f890868f because midkey is the same as first or last row 2024-11-15T22:38:05,833 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-15T22:38:05,833 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:38:05,833 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/3c99199ab7aa4828b6058112f890868f because midkey is the same as first or last row 2024-11-15T22:38:05,833 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-15T22:38:05,833 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:38:05,833 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/3c99199ab7aa4828b6058112f890868f because midkey is the same as first or last row 2024-11-15T22:38:05,833 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:38:05,833 DEBUG [RS:0;e611192d6313:44569-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4876379c8ce36c0ab9ae28b3c1e67fd7:info 2024-11-15T22:38:05,846 INFO [regionserver/e611192d6313:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:05,932 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.1731710269734 to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/oldWALs/e611192d6313%2C44569%2C1731710246574.1731710269734 2024-11-15T22:38:05,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T22:38:05,964 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T22:38:05,964 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:38:05,964 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:38:05,964 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:38:05,964 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T22:38:05,965 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T22:38:05,965 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=900664206, stopped=false 2024-11-15T22:38:05,965 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e611192d6313,46573,1731710246399 2024-11-15T22:38:06,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T22:38:06,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T22:38:06,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45181-0x10140a559990002, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T22:38:06,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:06,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:06,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45181-0x10140a559990002, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:06,036 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T22:38:06,036 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T22:38:06,037 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:38:06,037 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:38:06,037 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:38:06,037 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e611192d6313,44569,1731710246574' ***** 2024-11-15T22:38:06,037 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T22:38:06,037 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e611192d6313,45181,1731710247742' ***** 2024-11-15T22:38:06,037 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T22:38:06,037 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45181-0x10140a559990002, quorum=127.0.0.1:50674, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:38:06,037 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:38:06,037 INFO [RS:0;e611192d6313:44569 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T22:38:06,038 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T22:38:06,038 INFO [RS:0;e611192d6313:44569 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T22:38:06,038 INFO [RS:0;e611192d6313:44569 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T22:38:06,038 INFO [RS:1;e611192d6313:45181 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T22:38:06,038 INFO [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(3091): Received CLOSE for 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:38:06,038 INFO [RS:1;e611192d6313:45181 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T22:38:06,038 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T22:38:06,038 INFO [RS:1;e611192d6313:45181 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T22:38:06,038 INFO [RS:1;e611192d6313:45181 {}] regionserver.HRegionServer(959): stopping server e611192d6313,45181,1731710247742 2024-11-15T22:38:06,038 INFO [RS:1;e611192d6313:45181 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T22:38:06,038 INFO [RS:1;e611192d6313:45181 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;e611192d6313:45181. 2024-11-15T22:38:06,038 INFO [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(959): stopping server e611192d6313,44569,1731710246574 2024-11-15T22:38:06,038 INFO [RS:0;e611192d6313:44569 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T22:38:06,038 DEBUG [RS:1;e611192d6313:45181 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:38:06,038 INFO [RS:0;e611192d6313:44569 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e611192d6313:44569. 2024-11-15T22:38:06,038 DEBUG [RS:1;e611192d6313:45181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:38:06,038 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4876379c8ce36c0ab9ae28b3c1e67fd7, disabling compactions & flushes 2024-11-15T22:38:06,038 DEBUG [RS:0;e611192d6313:44569 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:38:06,038 INFO [RS:1;e611192d6313:45181 {}] regionserver.HRegionServer(976): stopping server e611192d6313,45181,1731710247742; all regions closed. 2024-11-15T22:38:06,038 DEBUG [RS:0;e611192d6313:44569 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:38:06,038 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:38:06,038 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:38:06,038 INFO [RS:0;e611192d6313:44569 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T22:38:06,038 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. after waiting 0 ms 2024-11-15T22:38:06,038 INFO [RS:0;e611192d6313:44569 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T22:38:06,038 INFO [RS:0;e611192d6313:44569 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T22:38:06,038 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:38:06,039 INFO [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T22:38:06,039 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:06,039 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:06,039 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:06,039 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:06,039 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:06,039 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/498a8a1fa8864f35bbad9a6d43eda13f, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/8485b65fe9364ce3990ce5282b82d942, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ead7aca831e4431a8a56cd7ecef6030a, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/0c1ce5c069034113979cfaa51b16985b, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/b7fbc31b9d7c40e3b7af0df6916a8000, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/af2ce8e8276a425ab10df6de1714334e, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ccde1d411eb145ab930458c373d1dcf3, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/4f9100590e2742c98a97ed3f2664eb9c, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/5f94a6c67e6e43caa9e16877e15f2128] to archive 2024-11-15T22:38:06,040 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T22:38:06,042 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/498a8a1fa8864f35bbad9a6d43eda13f to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/498a8a1fa8864f35bbad9a6d43eda13f 2024-11-15T22:38:06,043 INFO [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T22:38:06,043 DEBUG [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 4876379c8ce36c0ab9ae28b3c1e67fd7=TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.} 2024-11-15T22:38:06,043 DEBUG [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4876379c8ce36c0ab9ae28b3c1e67fd7 2024-11-15T22:38:06,043 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T22:38:06,043 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T22:38:06,043 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/8485b65fe9364ce3990ce5282b82d942 to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/8485b65fe9364ce3990ce5282b82d942 2024-11-15T22:38:06,043 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T22:38:06,043 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T22:38:06,043 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:06,043 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T22:38:06,043 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:06,043 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 2024-11-15T22:38:06,043 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-15T22:38:06,044 WARN [IPC Server handler 0 on default port 41079 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 has not been closed. Lease recovery is in progress. RecoveryId = 1082 for block blk_1073741837_1013 2024-11-15T22:38:06,044 ERROR [FSHLog-0-hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25-prefix:e611192d6313,44569,1731710246574.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:06,044 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 after 1ms 2024-11-15T22:38:06,044 WARN [FSHLog-0-hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25-prefix:e611192d6313,44569,1731710246574.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:06,044 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e611192d6313%2C44569%2C1731710246574.meta:.meta(num 1731710247599) roll requested 2024-11-15T22:38:06,044 INFO [regionserver/e611192d6313:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C44569%2C1731710246574.meta.1731710286044.meta 2024-11-15T22:38:06,045 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ead7aca831e4431a8a56cd7ecef6030a to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ead7aca831e4431a8a56cd7ecef6030a 2024-11-15T22:38:06,046 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/0c1ce5c069034113979cfaa51b16985b to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/0c1ce5c069034113979cfaa51b16985b 2024-11-15T22:38:06,047 WARN [Thread-1041 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:06,047 WARN [Thread-1041 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741898_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK], DatanodeInfoWithStorage[127.0.0.1:32921,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:38:06,047 WARN [Thread-1041 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741898_1083 2024-11-15T22:38:06,048 WARN [Thread-1041 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:38:06,048 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/b7fbc31b9d7c40e3b7af0df6916a8000 to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/b7fbc31b9d7c40e3b7af0df6916a8000 2024-11-15T22:38:06,050 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/af2ce8e8276a425ab10df6de1714334e to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/af2ce8e8276a425ab10df6de1714334e 2024-11-15T22:38:06,051 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ccde1d411eb145ab930458c373d1dcf3 to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/ccde1d411eb145ab930458c373d1dcf3 2024-11-15T22:38:06,053 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/4f9100590e2742c98a97ed3f2664eb9c to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/4f9100590e2742c98a97ed3f2664eb9c 2024-11-15T22:38:06,053 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:06,053 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:06,053 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:06,053 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:06,053 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:06,053 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710286044.meta 2024-11-15T22:38:06,054 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:06,054 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33791,DS-2cc1dc79-beaf-4fef-a5b0-8d43c36c8e39,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:06,054 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta 2024-11-15T22:38:06,054 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/5f94a6c67e6e43caa9e16877e15f2128 to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/info/5f94a6c67e6e43caa9e16877e15f2128 2024-11-15T22:38:06,054 WARN [IPC Server handler 4 on default port 41079 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta has not been closed. Lease recovery is in progress. RecoveryId = 1085 for block blk_1073741834_1010 2024-11-15T22:38:06,055 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta after 0ms 2024-11-15T22:38:06,054 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e611192d6313:46573 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-15T22:38:06,055 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33327:33327),(127.0.0.1/127.0.0.1:38631:38631)] 2024-11-15T22:38:06,055 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [498a8a1fa8864f35bbad9a6d43eda13f=10347, 8485b65fe9364ce3990ce5282b82d942=12506, ead7aca831e4431a8a56cd7ecef6030a=17994, 0c1ce5c069034113979cfaa51b16985b=6027, b7fbc31b9d7c40e3b7af0df6916a8000=6027, af2ce8e8276a425ab10df6de1714334e=18097, ccde1d411eb145ab930458c373d1dcf3=6027, 4f9100590e2742c98a97ed3f2664eb9c=9267, 5f94a6c67e6e43caa9e16877e15f2128=13583] 2024-11-15T22:38:06,055 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta is not closed yet, will try archiving it next time 2024-11-15T22:38:06,058 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4876379c8ce36c0ab9ae28b3c1e67fd7/recovered.edits/82.seqid, newMaxSeqId=82, maxSeqId=1 2024-11-15T22:38:06,059 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:38:06,059 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4876379c8ce36c0ab9ae28b3c1e67fd7: Waiting for close lock at 1731710286038Running coprocessor pre-close hooks at 1731710286038Disabling compacts and flushes for region at 1731710286038Disabling writes for close at 1731710286038Writing region close event to WAL at 1731710286055 (+17 ms)Running coprocessor post-close hooks at 1731710286059 (+4 ms)Closed at 1731710286059 2024-11-15T22:38:06,059 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7. 2024-11-15T22:38:06,069 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/.tmp/info/0221fb5771d24f8dab3b3dc8be46f586 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731710247869.4876379c8ce36c0ab9ae28b3c1e67fd7./info:regioninfo/1731710248245/Put/seqid=0 2024-11-15T22:38:06,071 WARN [Thread-1047 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:06,072 WARN [Thread-1047 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK], DatanodeInfoWithStorage[127.0.0.1:32921,DS-aa9da6b5-b12a-4db4-863a-9a7c5c3e76b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:38:06,072 WARN [Thread-1047 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741900_1086 2024-11-15T22:38:06,072 WARN [Thread-1047 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:38:06,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741901_1087 (size=7089) 2024-11-15T22:38:06,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741901_1087 (size=7089) 2024-11-15T22:38:06,079 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/.tmp/info/0221fb5771d24f8dab3b3dc8be46f586 2024-11-15T22:38:06,100 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/.tmp/ns/00d4bb5b1cab4050933a13ff77e7045a is 43, key is default/ns:d/1731710247682/Put/seqid=0 2024-11-15T22:38:06,103 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1088 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35127 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:06,103 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:35842 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741902_1088] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data10]'}, localName='127.0.0.1:40635', datanodeUuid='9535c085-55f4-456b-b220-53fa517075d9', xmitsInProgress=0}:Exception transferring block BP-1428576717-172.17.0.3-1731710244171:blk_1073741902_1088 to mirror 127.0.0.1:35127 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:06,104 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741902_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK], DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:38:06,104 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:35842 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741902_1088] {}] datanode.BlockReceiver(316): Block 1073741902 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T22:38:06,104 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741902_1088 2024-11-15T22:38:06,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:35842 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741902_1088] {}] datanode.DataXceiver(331): 127.0.0.1:40635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35842 dst: /127.0.0.1:40635 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:06,104 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:38:06,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741903_1089 (size=5153) 2024-11-15T22:38:06,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741903_1089 (size=5153) 2024-11-15T22:38:06,109 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/.tmp/ns/00d4bb5b1cab4050933a13ff77e7045a 2024-11-15T22:38:06,133 INFO [regionserver/e611192d6313:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T22:38:06,133 INFO [regionserver/e611192d6313:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T22:38:06,134 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/.tmp/table/705abaea111e4c128a7e664438b4fd6c is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731710248258/Put/seqid=0 2024-11-15T22:38:06,137 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1090 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35127 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:06,137 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:35860 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741904_1090] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data10]'}, localName='127.0.0.1:40635', datanodeUuid='9535c085-55f4-456b-b220-53fa517075d9', xmitsInProgress=0}:Exception transferring block BP-1428576717-172.17.0.3-1731710244171:blk_1073741904_1090 to mirror 127.0.0.1:35127 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:06,137 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1428576717-172.17.0.3-1731710244171:blk_1073741904_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40635,DS-d9c0e8f7-e65e-4b54-a077-3ab14498fd2c,DISK], DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK]) is bad. 2024-11-15T22:38:06,137 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-1428576717-172.17.0.3-1731710244171:blk_1073741904_1090 2024-11-15T22:38:06,137 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:35860 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741904_1090] {}] datanode.BlockReceiver(316): Block 1073741904 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T22:38:06,137 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_409341157_22 at /127.0.0.1:35860 [Receiving block BP-1428576717-172.17.0.3-1731710244171:blk_1073741904_1090] {}] datanode.DataXceiver(331): 127.0.0.1:40635:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35860 dst: /127.0.0.1:40635 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:06,138 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35127,DS-088677f4-f0db-4d2f-8741-897fa2c7056e,DISK] 2024-11-15T22:38:06,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741905_1091 (size=5424) 2024-11-15T22:38:06,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741905_1091 (size=5424) 2024-11-15T22:38:06,143 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/.tmp/table/705abaea111e4c128a7e664438b4fd6c 2024-11-15T22:38:06,150 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/.tmp/info/0221fb5771d24f8dab3b3dc8be46f586 as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/info/0221fb5771d24f8dab3b3dc8be46f586 2024-11-15T22:38:06,156 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/info/0221fb5771d24f8dab3b3dc8be46f586, entries=10, sequenceid=11, filesize=6.9 K 2024-11-15T22:38:06,157 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/.tmp/ns/00d4bb5b1cab4050933a13ff77e7045a as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/ns/00d4bb5b1cab4050933a13ff77e7045a 2024-11-15T22:38:06,164 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/ns/00d4bb5b1cab4050933a13ff77e7045a, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T22:38:06,165 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/.tmp/table/705abaea111e4c128a7e664438b4fd6c as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/table/705abaea111e4c128a7e664438b4fd6c 2024-11-15T22:38:06,171 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/table/705abaea111e4c128a7e664438b4fd6c, entries=2, sequenceid=11, filesize=5.3 K 2024-11-15T22:38:06,173 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 130ms, sequenceid=11, compaction requested=false 2024-11-15T22:38:06,178 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T22:38:06,178 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T22:38:06,178 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T22:38:06,179 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731710286043Running coprocessor pre-close hooks at 1731710286043Disabling compacts and flushes for region at 1731710286043Disabling writes for close at 1731710286043Obtaining lock to block concurrent updates at 1731710286043Preparing flush snapshotting stores in 1588230740 at 1731710286043Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731710286044 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731710286055 (+11 ms)Flushing 1588230740/info: creating writer at 1731710286055Flushing 1588230740/info: appending metadata at 1731710286069 (+14 ms)Flushing 1588230740/info: closing flushed file at 1731710286069Flushing 1588230740/ns: creating writer at 1731710286085 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731710286100 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731710286100Flushing 1588230740/table: creating writer at 1731710286116 (+16 ms)Flushing 1588230740/table: appending metadata at 1731710286133 (+17 ms)Flushing 1588230740/table: closing flushed file at 1731710286133Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@605fcfc1: reopening flushed file at 1731710286149 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f34e8dd: reopening flushed file at 1731710286156 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fcac241: reopening flushed file at 1731710286164 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 130ms, sequenceid=11, compaction requested=false at 1731710286173 (+9 ms)Writing region close event to WAL at 1731710286174 (+1 ms)Running coprocessor post-close hooks at 1731710286178 (+4 ms)Closed at 1731710286178 2024-11-15T22:38:06,179 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T22:38:06,243 INFO [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(976): stopping server e611192d6313,44569,1731710246574; all regions closed. 2024-11-15T22:38:06,244 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:06,244 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:06,244 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:06,244 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:06,244 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:06,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741899_1084 (size=825) 2024-11-15T22:38:06,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741899_1084 (size=825) 2024-11-15T22:38:06,842 INFO [regionserver/e611192d6313:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T22:38:06,843 INFO [regionserver/e611192d6313:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T22:38:07,062 INFO [regionserver/e611192d6313:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T22:38:07,349 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7ba8b02a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32921, datanodeUuid=581fc627-72c4-42db-bdf9-451bbec9abcb, infoPort=33327, infoSecurePort=0, ipcPort=37415, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171):Failed to transfer BP-1428576717-172.17.0.3-1731710244171:blk_1073741835_1011 to 127.0.0.1:35127 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:07,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741831_1007 (size=1321) 2024-11-15T22:38:07,841 INFO [regionserver/e611192d6313:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T22:38:08,309 INFO [master/e611192d6313:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T22:38:08,309 INFO [master/e611192d6313:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T22:38:08,347 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@419b5c7d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32921, datanodeUuid=581fc627-72c4-42db-bdf9-451bbec9abcb, infoPort=33327, infoSecurePort=0, ipcPort=37415, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171):Failed to transfer BP-1428576717-172.17.0.3-1731710244171:blk_1073741829_1005 to 127.0.0.1:35127 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:08,347 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7ba8b02a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32921, datanodeUuid=581fc627-72c4-42db-bdf9-451bbec9abcb, infoPort=33327, infoSecurePort=0, ipcPort=37415, storageInfo=lv=-57;cid=testClusterID;nsid=1662087254;c=1731710244171):Failed to transfer BP-1428576717-172.17.0.3-1731710244171:blk_1073741827_1003 to 127.0.0.1:35127 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:08,402 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T22:38:08,403 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T22:38:08,403 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T22:38:10,046 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 after 4003ms 2024-11-15T22:38:10,056 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta after 4002ms 2024-11-15T22:38:10,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741832_1008 (size=32) 2024-11-15T22:38:10,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741828_1004 (size=1189) 2024-11-15T22:38:10,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741877_1060 (size=12100) 2024-11-15T22:38:11,044 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-15T22:38:11,047 DEBUG [RS:1;e611192d6313:45181 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/oldWALs 2024-11-15T22:38:11,048 INFO [RS:1;e611192d6313:45181 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e611192d6313%2C45181%2C1731710247742:(num 1731710247973) 2024-11-15T22:38:11,048 DEBUG [RS:1;e611192d6313:45181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:38:11,048 INFO [RS:1;e611192d6313:45181 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T22:38:11,048 INFO [RS:1;e611192d6313:45181 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T22:38:11,049 INFO [RS:1;e611192d6313:45181 {}] hbase.ChoreService(370): Chore service for: regionserver/e611192d6313:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T22:38:11,049 INFO [RS:1;e611192d6313:45181 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T22:38:11,049 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T22:38:11,049 INFO [RS:1;e611192d6313:45181 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T22:38:11,050 INFO [RS:1;e611192d6313:45181 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T22:38:11,050 INFO [RS:1;e611192d6313:45181 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T22:38:11,050 INFO [RS:1;e611192d6313:45181 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45181 2024-11-15T22:38:11,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:11,060 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:11,075 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:11,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:11,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:11,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:11,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:11,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:11,085 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:11,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:38:11,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45181-0x10140a559990002, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e611192d6313,45181,1731710247742 2024-11-15T22:38:11,120 INFO [RS:1;e611192d6313:45181 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T22:38:11,131 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e611192d6313,45181,1731710247742] 2024-11-15T22:38:11,141 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e611192d6313,45181,1731710247742 already deleted, retry=false 2024-11-15T22:38:11,141 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e611192d6313,45181,1731710247742 expired; onlineServers=1 2024-11-15T22:38:11,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45181-0x10140a559990002, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:38:11,231 INFO [RS:1;e611192d6313:45181 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T22:38:11,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45181-0x10140a559990002, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:38:11,231 INFO [RS:1;e611192d6313:45181 {}] regionserver.HRegionServer(1031): Exiting; stopping=e611192d6313,45181,1731710247742; zookeeper connection closed. 2024-11-15T22:38:11,232 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4655b128 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4655b128 2024-11-15T22:38:11,245 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-15T22:38:11,254 DEBUG [RS:0;e611192d6313:44569 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/oldWALs 2024-11-15T22:38:11,254 INFO [RS:0;e611192d6313:44569 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e611192d6313%2C44569%2C1731710246574.meta:.meta(num 1731710286044) 2024-11-15T22:38:11,255 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:11,255 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:11,255 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:11,255 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:11,255 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:11,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741892_1076 (size=16308) 2024-11-15T22:38:11,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741892_1076 (size=16308) 2024-11-15T22:38:11,261 DEBUG [RS:0;e611192d6313:44569 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/oldWALs 2024-11-15T22:38:11,261 INFO [RS:0;e611192d6313:44569 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e611192d6313%2C44569%2C1731710246574:(num 1731710285518) 2024-11-15T22:38:11,261 DEBUG [RS:0;e611192d6313:44569 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:38:11,261 INFO [RS:0;e611192d6313:44569 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T22:38:11,261 INFO [RS:0;e611192d6313:44569 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T22:38:11,262 INFO [RS:0;e611192d6313:44569 {}] hbase.ChoreService(370): Chore service for: regionserver/e611192d6313:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T22:38:11,262 INFO [RS:0;e611192d6313:44569 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T22:38:11,262 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T22:38:11,262 INFO [RS:0;e611192d6313:44569 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44569 2024-11-15T22:38:11,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:38:11,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e611192d6313,44569,1731710246574 2024-11-15T22:38:11,275 INFO [RS:0;e611192d6313:44569 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T22:38:11,286 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e611192d6313,44569,1731710246574] 2024-11-15T22:38:11,296 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e611192d6313,44569,1731710246574 already deleted, retry=false 2024-11-15T22:38:11,296 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e611192d6313,44569,1731710246574 expired; onlineServers=0 2024-11-15T22:38:11,296 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e611192d6313,46573,1731710246399' ***** 2024-11-15T22:38:11,296 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T22:38:11,296 INFO [M:0;e611192d6313:46573 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T22:38:11,297 INFO [M:0;e611192d6313:46573 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T22:38:11,297 DEBUG [M:0;e611192d6313:46573 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T22:38:11,297 DEBUG [M:0;e611192d6313:46573 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T22:38:11,297 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T22:38:11,297 DEBUG [master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710246918 {}] cleaner.HFileCleaner(306): Exit Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710246918,5,FailOnTimeoutGroup] 2024-11-15T22:38:11,297 DEBUG [master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710246918 {}] cleaner.HFileCleaner(306): Exit Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710246918,5,FailOnTimeoutGroup] 2024-11-15T22:38:11,297 INFO [M:0;e611192d6313:46573 {}] hbase.ChoreService(370): Chore service for: master/e611192d6313:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T22:38:11,298 INFO [M:0;e611192d6313:46573 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T22:38:11,298 DEBUG [M:0;e611192d6313:46573 {}] master.HMaster(1795): Stopping service threads 2024-11-15T22:38:11,298 INFO [M:0;e611192d6313:46573 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T22:38:11,298 INFO [M:0;e611192d6313:46573 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T22:38:11,298 INFO [M:0;e611192d6313:46573 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T22:38:11,298 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T22:38:11,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T22:38:11,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:11,310 DEBUG [M:0;e611192d6313:46573 {}] zookeeper.ZKUtil(347): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T22:38:11,310 WARN [M:0;e611192d6313:46573 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T22:38:11,311 INFO [M:0;e611192d6313:46573 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/.lastflushedseqids 2024-11-15T22:38:11,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741906_1092 (size=130) 2024-11-15T22:38:11,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741906_1092 (size=130) 2024-11-15T22:38:11,324 INFO [M:0;e611192d6313:46573 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T22:38:11,324 INFO [M:0;e611192d6313:46573 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T22:38:11,324 DEBUG [M:0;e611192d6313:46573 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T22:38:11,324 INFO [M:0;e611192d6313:46573 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:11,325 DEBUG [M:0;e611192d6313:46573 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:11,325 DEBUG [M:0;e611192d6313:46573 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T22:38:11,325 DEBUG [M:0;e611192d6313:46573 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:11,325 INFO [M:0;e611192d6313:46573 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-11-15T22:38:11,339 DEBUG [M:0;e611192d6313:46573 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/91f76821e2b54750a4588deb33502d48 is 82, key is hbase:meta,,1/info:regioninfo/1731710247633/Put/seqid=0 2024-11-15T22:38:11,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741907_1093 (size=5672) 2024-11-15T22:38:11,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741907_1093 (size=5672) 2024-11-15T22:38:11,344 INFO [M:0;e611192d6313:46573 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/91f76821e2b54750a4588deb33502d48 2024-11-15T22:38:11,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741826_1002 (size=42) 2024-11-15T22:38:11,363 DEBUG [M:0;e611192d6313:46573 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b82b3b68180247f2a2f9d6d4b2ecb301 is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731710248263/Put/seqid=0 2024-11-15T22:38:11,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741908_1094 (size=6254) 2024-11-15T22:38:11,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741908_1094 (size=6254) 2024-11-15T22:38:11,368 INFO [M:0;e611192d6313:46573 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b82b3b68180247f2a2f9d6d4b2ecb301 2024-11-15T22:38:11,373 INFO [M:0;e611192d6313:46573 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b82b3b68180247f2a2f9d6d4b2ecb301 2024-11-15T22:38:11,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:38:11,386 INFO [RS:0;e611192d6313:44569 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T22:38:11,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44569-0x10140a559990001, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:38:11,386 INFO [RS:0;e611192d6313:44569 {}] regionserver.HRegionServer(1031): Exiting; stopping=e611192d6313,44569,1731710246574; zookeeper connection closed. 2024-11-15T22:38:11,386 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5e4de769 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5e4de769 2024-11-15T22:38:11,386 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-15T22:38:11,387 DEBUG [M:0;e611192d6313:46573 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/49fa1a5f99904ac79d37a61555e36159 is 69, key is e611192d6313,44569,1731710246574/rs:state/1731710247033/Put/seqid=0 2024-11-15T22:38:11,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741909_1095 (size=5224) 2024-11-15T22:38:11,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741909_1095 (size=5224) 2024-11-15T22:38:11,392 INFO [M:0;e611192d6313:46573 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/49fa1a5f99904ac79d37a61555e36159 2024-11-15T22:38:11,410 DEBUG [M:0;e611192d6313:46573 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fb190e45dda24cb2b6607c0fcf8953fe is 52, key is load_balancer_on/state:d/1731710247726/Put/seqid=0 2024-11-15T22:38:11,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741910_1096 (size=5056) 2024-11-15T22:38:11,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741910_1096 (size=5056) 2024-11-15T22:38:11,415 INFO [M:0;e611192d6313:46573 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fb190e45dda24cb2b6607c0fcf8953fe 2024-11-15T22:38:11,420 DEBUG [M:0;e611192d6313:46573 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/91f76821e2b54750a4588deb33502d48 as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/91f76821e2b54750a4588deb33502d48 2024-11-15T22:38:11,426 INFO [M:0;e611192d6313:46573 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/91f76821e2b54750a4588deb33502d48, entries=8, sequenceid=60, filesize=5.5 K 2024-11-15T22:38:11,426 DEBUG [M:0;e611192d6313:46573 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b82b3b68180247f2a2f9d6d4b2ecb301 as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b82b3b68180247f2a2f9d6d4b2ecb301 2024-11-15T22:38:11,431 INFO [M:0;e611192d6313:46573 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b82b3b68180247f2a2f9d6d4b2ecb301 2024-11-15T22:38:11,431 INFO [M:0;e611192d6313:46573 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b82b3b68180247f2a2f9d6d4b2ecb301, entries=6, sequenceid=60, filesize=6.1 K 2024-11-15T22:38:11,432 DEBUG [M:0;e611192d6313:46573 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/49fa1a5f99904ac79d37a61555e36159 as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/49fa1a5f99904ac79d37a61555e36159 2024-11-15T22:38:11,437 INFO [M:0;e611192d6313:46573 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/49fa1a5f99904ac79d37a61555e36159, entries=2, sequenceid=60, filesize=5.1 K 2024-11-15T22:38:11,438 DEBUG [M:0;e611192d6313:46573 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fb190e45dda24cb2b6607c0fcf8953fe as hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fb190e45dda24cb2b6607c0fcf8953fe 2024-11-15T22:38:11,443 INFO [M:0;e611192d6313:46573 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fb190e45dda24cb2b6607c0fcf8953fe, entries=1, sequenceid=60, filesize=4.9 K 2024-11-15T22:38:11,444 INFO [M:0;e611192d6313:46573 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=60, compaction requested=false 2024-11-15T22:38:11,445 INFO [M:0;e611192d6313:46573 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:11,445 DEBUG [M:0;e611192d6313:46573 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731710291324Disabling compacts and flushes for region at 1731710291324Disabling writes for close at 1731710291325 (+1 ms)Obtaining lock to block concurrent updates at 1731710291325Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731710291325Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1731710291325Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731710291326 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731710291326Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731710291338 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731710291338Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731710291350 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731710291362 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731710291362Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731710291373 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731710291387 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731710291387Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731710291397 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731710291410 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731710291410Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7de1e864: reopening flushed file at 1731710291420 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44628b65: reopening flushed file at 1731710291426 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4533b4ac: reopening flushed file at 1731710291431 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1da6ec75: reopening flushed file at 1731710291437 (+6 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=60, compaction requested=false at 1731710291444 (+7 ms)Writing region close event to WAL at 1731710291445 (+1 ms)Closed at 1731710291445 2024-11-15T22:38:11,446 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:11,447 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:11,447 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:11,447 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:11,447 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:11,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741891_1074 (size=1045) 2024-11-15T22:38:11,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40635 is added to blk_1073741891_1074 (size=1045) 2024-11-15T22:38:11,586 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T22:38:11,606 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:11,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:11,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:11,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:11,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:11,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:11,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:11,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:12,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:12,058 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:12,370 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@317b5da {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1428576717-172.17.0.3-1731710244171:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:33791,null,null]) java.net.ConnectException: Call From e611192d6313/172.17.0.3 to localhost:39447 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-15T22:38:12,964 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/WALs/e611192d6313,46573,1731710246399/e611192d6313%2C46573%2C1731710246399.1731710246704 to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/oldWALs/e611192d6313%2C46573%2C1731710246399.1731710246704 2024-11-15T22:38:12,974 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/MasterData/oldWALs/e611192d6313%2C46573%2C1731710246399.1731710246704 to hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/oldWALs/e611192d6313%2C46573%2C1731710246399.1731710246704$masterlocalwal$ 2024-11-15T22:38:12,974 INFO [M:0;e611192d6313:46573 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T22:38:12,974 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T22:38:12,974 INFO [M:0;e611192d6313:46573 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46573 2024-11-15T22:38:12,974 INFO [M:0;e611192d6313:46573 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T22:38:13,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:13,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:13,126 INFO [M:0;e611192d6313:46573 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T22:38:13,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:38:13,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46573-0x10140a559990000, quorum=127.0.0.1:50674, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:38:13,133 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30d4c51a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:13,134 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34827478{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:38:13,134 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:38:13,135 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3edaa0c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:38:13,135 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@796aacdc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir/,STOPPED} 2024-11-15T22:38:13,138 WARN [BP-1428576717-172.17.0.3-1731710244171 heartbeating to localhost/127.0.0.1:41079 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:38:13,139 WARN [BP-1428576717-172.17.0.3-1731710244171 heartbeating to localhost/127.0.0.1:41079 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1428576717-172.17.0.3-1731710244171 (Datanode Uuid 581fc627-72c4-42db-bdf9-451bbec9abcb) service to localhost/127.0.0.1:41079 2024-11-15T22:38:13,138 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:38:13,139 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:38:13,138 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7f1c9f21 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1428576717-172.17.0.3-1731710244171:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:33791,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:39447 , LocalHost:localPort e611192d6313/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-15T22:38:13,139 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7f1c9f21 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1428576717-172.17.0.3-1731710244171:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:32921,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1428576717-172.17.0.3-1731710244171 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:13,139 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7f1c9f21 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1428576717-172.17.0.3-1731710244171:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:33791,null,null], DatanodeInfoWithStorage[127.0.0.1:32921,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1428576717-172.17.0.3-1731710244171:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:33791,null,null], DatanodeInfoWithStorage[127.0.0.1:32921,null,null]] 2024-11-15T22:38:13,139 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7f1c9f21 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1428576717-172.17.0.3-1731710244171:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:33791,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1428576717-172.17.0.3-1731710244171 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:13,139 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7f1c9f21 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1428576717-172.17.0.3-1731710244171:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:32921,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1428576717-172.17.0.3-1731710244171 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:13,139 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data3/current/BP-1428576717-172.17.0.3-1731710244171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:13,140 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7f1c9f21 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1428576717-172.17.0.3-1731710244171:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:33791,null,null], DatanodeInfoWithStorage[127.0.0.1:32921,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1428576717-172.17.0.3-1731710244171:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:33791,null,null], DatanodeInfoWithStorage[127.0.0.1:32921,null,null]] 2024-11-15T22:38:13,140 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data4/current/BP-1428576717-172.17.0.3-1731710244171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:13,140 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:38:13,143 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5dd0b56c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:13,143 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3740407e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:38:13,143 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:38:13,143 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d4ec789{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:38:13,144 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@792fa80c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir/,STOPPED} 2024-11-15T22:38:13,144 WARN [BP-1428576717-172.17.0.3-1731710244171 heartbeating to localhost/127.0.0.1:41079 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:38:13,144 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:38:13,144 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:38:13,144 WARN [BP-1428576717-172.17.0.3-1731710244171 heartbeating to localhost/127.0.0.1:41079 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1428576717-172.17.0.3-1731710244171 (Datanode Uuid 9535c085-55f4-456b-b220-53fa517075d9) service to localhost/127.0.0.1:41079 2024-11-15T22:38:13,145 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data9/current/BP-1428576717-172.17.0.3-1731710244171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:13,145 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/cluster_199669c5-f772-4003-455b-110ce67a79b3/data/data10/current/BP-1428576717-172.17.0.3-1731710244171 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:13,145 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:38:13,150 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@68a89b56{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T22:38:13,151 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@229a8eec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:38:13,151 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:38:13,151 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4478d7de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:38:13,151 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65f2c48f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir/,STOPPED} 2024-11-15T22:38:13,160 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T22:38:13,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T22:38:13,213 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=157 (was 83) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41079 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41079 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:41079 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41079 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41079 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41079 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41079 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41079 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f854cbefdc8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41079 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:36437 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f854cbefdc8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36437 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41079 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:41079 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=452 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=112 (was 70) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4163 (was 4655) 2024-11-15T22:38:13,221 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=157, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=112, ProcessCount=11, AvailableMemoryMB=4164 2024-11-15T22:38:13,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T22:38:13,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.log.dir so I do NOT create it in target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34 2024-11-15T22:38:13,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4867067f-b67d-7264-ece4-5a61857a2385/hadoop.tmp.dir so I do NOT create it in target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34 2024-11-15T22:38:13,222 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465, deleteOnExit=true 2024-11-15T22:38:13,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T22:38:13,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/test.cache.data in system properties and HBase conf 2024-11-15T22:38:13,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T22:38:13,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir in system properties and HBase conf 2024-11-15T22:38:13,223 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T22:38:13,223 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T22:38:13,223 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T22:38:13,223 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T22:38:13,223 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T22:38:13,223 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T22:38:13,223 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T22:38:13,223 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T22:38:13,223 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T22:38:13,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T22:38:13,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T22:38:13,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T22:38:13,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T22:38:13,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/nfs.dump.dir in system properties and HBase conf 2024-11-15T22:38:13,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/java.io.tmpdir in system properties and HBase conf 2024-11-15T22:38:13,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T22:38:13,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T22:38:13,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T22:38:13,235 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T22:38:13,608 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:38:13,613 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:38:13,614 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:38:13,614 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:38:13,614 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:38:13,615 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:38:13,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7091f2a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:38:13,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42aa99e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:38:13,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62bbed65{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/java.io.tmpdir/jetty-localhost-33907-hadoop-hdfs-3_4_1-tests_jar-_-any-15807337823697945743/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T22:38:13,707 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2940e1de{HTTP/1.1, (http/1.1)}{localhost:33907} 2024-11-15T22:38:13,707 INFO [Time-limited test {}] server.Server(415): Started @151865ms 2024-11-15T22:38:13,718 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T22:38:13,969 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:38:13,972 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:38:13,973 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:38:13,973 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:38:13,973 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T22:38:13,974 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7992aa88{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:38:13,974 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a75563d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:38:14,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:14,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:14,068 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@37b5496d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/java.io.tmpdir/jetty-localhost-39017-hadoop-hdfs-3_4_1-tests_jar-_-any-2324291173861449119/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:14,068 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@8825f29{HTTP/1.1, (http/1.1)}{localhost:39017} 2024-11-15T22:38:14,069 INFO [Time-limited test {}] server.Server(415): Started @152226ms 2024-11-15T22:38:14,070 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:38:14,097 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:38:14,101 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:38:14,101 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:38:14,102 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:38:14,102 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:38:14,102 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@126bd190{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:38:14,103 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ccd5f6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:38:14,195 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ad6192b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/java.io.tmpdir/jetty-localhost-39311-hadoop-hdfs-3_4_1-tests_jar-_-any-15190927193968925719/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:14,195 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@644dfd01{HTTP/1.1, (http/1.1)}{localhost:39311} 2024-11-15T22:38:14,195 INFO [Time-limited test {}] server.Server(415): Started @152353ms 2024-11-15T22:38:14,197 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:38:15,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:15,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:15,539 WARN [Thread-1198 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data1/current/BP-1285903154-172.17.0.3-1731710293246/current, will proceed with Du for space computation calculation, 2024-11-15T22:38:15,539 WARN [Thread-1199 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data2/current/BP-1285903154-172.17.0.3-1731710293246/current, will proceed with Du for space computation calculation, 2024-11-15T22:38:15,558 WARN [Thread-1162 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:38:15,561 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6c8979059d56ea07 with lease ID 0xd3009b2580b17147: Processing first storage report for DS-9e21222b-34c7-4b05-98c9-464b2da7eff3 from datanode DatanodeRegistration(127.0.0.1:33199, datanodeUuid=6f378189-230f-48c9-8213-c8e5fdaa10d0, infoPort=45869, infoSecurePort=0, ipcPort=41119, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246) 2024-11-15T22:38:15,561 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6c8979059d56ea07 with lease ID 0xd3009b2580b17147: from storage DS-9e21222b-34c7-4b05-98c9-464b2da7eff3 node DatanodeRegistration(127.0.0.1:33199, datanodeUuid=6f378189-230f-48c9-8213-c8e5fdaa10d0, infoPort=45869, infoSecurePort=0, ipcPort=41119, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:38:15,561 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6c8979059d56ea07 with lease ID 0xd3009b2580b17147: Processing first storage report for DS-1c1b8f57-536b-41f0-8ecb-f347d1964b55 from datanode DatanodeRegistration(127.0.0.1:33199, datanodeUuid=6f378189-230f-48c9-8213-c8e5fdaa10d0, infoPort=45869, infoSecurePort=0, ipcPort=41119, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246) 2024-11-15T22:38:15,561 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6c8979059d56ea07 with lease ID 0xd3009b2580b17147: from storage DS-1c1b8f57-536b-41f0-8ecb-f347d1964b55 node DatanodeRegistration(127.0.0.1:33199, datanodeUuid=6f378189-230f-48c9-8213-c8e5fdaa10d0, infoPort=45869, infoSecurePort=0, ipcPort=41119, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:38:15,684 WARN [Thread-1209 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data3/current/BP-1285903154-172.17.0.3-1731710293246/current, will proceed with Du for space computation calculation, 2024-11-15T22:38:15,685 WARN [Thread-1210 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data4/current/BP-1285903154-172.17.0.3-1731710293246/current, will proceed with Du for space computation calculation, 2024-11-15T22:38:15,705 WARN [Thread-1185 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:38:15,707 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x19f8e8828e52e247 with lease ID 0xd3009b2580b17148: Processing first storage report for DS-3d6e597f-4928-42c9-ac0c-688bac1495bc from datanode DatanodeRegistration(127.0.0.1:41555, datanodeUuid=1e8bc306-eb2e-4f91-934b-c931e98c67bd, infoPort=33675, infoSecurePort=0, ipcPort=45773, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246) 2024-11-15T22:38:15,707 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x19f8e8828e52e247 with lease ID 0xd3009b2580b17148: from storage DS-3d6e597f-4928-42c9-ac0c-688bac1495bc node DatanodeRegistration(127.0.0.1:41555, datanodeUuid=1e8bc306-eb2e-4f91-934b-c931e98c67bd, infoPort=33675, infoSecurePort=0, ipcPort=45773, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:38:15,707 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x19f8e8828e52e247 with lease ID 0xd3009b2580b17148: Processing first storage report for DS-44638cc2-8414-4115-b5fe-7f982864d593 from datanode DatanodeRegistration(127.0.0.1:41555, datanodeUuid=1e8bc306-eb2e-4f91-934b-c931e98c67bd, infoPort=33675, infoSecurePort=0, ipcPort=45773, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246) 2024-11-15T22:38:15,707 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x19f8e8828e52e247 with lease ID 0xd3009b2580b17148: from storage DS-44638cc2-8414-4115-b5fe-7f982864d593 node DatanodeRegistration(127.0.0.1:41555, datanodeUuid=1e8bc306-eb2e-4f91-934b-c931e98c67bd, infoPort=33675, infoSecurePort=0, ipcPort=45773, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:38:15,732 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34 2024-11-15T22:38:15,736 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/zookeeper_0, clientPort=55589, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T22:38:15,737 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55589 2024-11-15T22:38:15,737 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:15,739 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:15,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41555 is added to blk_1073741825_1001 (size=7) 2024-11-15T22:38:15,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33199 is added to blk_1073741825_1001 (size=7) 2024-11-15T22:38:15,753 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2 with version=8 2024-11-15T22:38:15,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/hbase-staging 2024-11-15T22:38:15,756 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e611192d6313:0 server-side Connection retries=45 2024-11-15T22:38:15,756 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:38:15,756 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T22:38:15,756 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T22:38:15,756 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:38:15,756 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T22:38:15,756 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T22:38:15,756 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T22:38:15,757 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34355 2024-11-15T22:38:15,759 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34355 connecting to ZooKeeper ensemble=127.0.0.1:55589 2024-11-15T22:38:15,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:343550x0, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T22:38:15,812 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34355-0x10140a61a620000 connected 2024-11-15T22:38:15,900 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:15,905 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:15,911 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:38:15,911 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2, hbase.cluster.distributed=false 2024-11-15T22:38:15,912 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T22:38:15,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34355 2024-11-15T22:38:15,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34355 2024-11-15T22:38:15,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34355 2024-11-15T22:38:15,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34355 2024-11-15T22:38:15,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34355 2024-11-15T22:38:15,927 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e611192d6313:0 server-side Connection retries=45 2024-11-15T22:38:15,927 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:38:15,927 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T22:38:15,927 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T22:38:15,927 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:38:15,927 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T22:38:15,927 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T22:38:15,927 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T22:38:15,928 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39125 2024-11-15T22:38:15,929 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39125 connecting to ZooKeeper ensemble=127.0.0.1:55589 2024-11-15T22:38:15,930 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:15,931 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:15,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:391250x0, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T22:38:15,941 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:391250x0, quorum=127.0.0.1:55589, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:38:15,941 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39125-0x10140a61a620001 connected 2024-11-15T22:38:15,941 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T22:38:15,942 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T22:38:15,942 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T22:38:15,943 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T22:38:15,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39125 2024-11-15T22:38:15,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39125 2024-11-15T22:38:15,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39125 2024-11-15T22:38:15,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39125 2024-11-15T22:38:15,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39125 2024-11-15T22:38:15,957 DEBUG [M:0;e611192d6313:34355 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e611192d6313:34355 2024-11-15T22:38:15,957 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e611192d6313,34355,1731710295755 2024-11-15T22:38:15,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:38:15,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:38:15,970 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e611192d6313,34355,1731710295755 2024-11-15T22:38:15,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:15,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T22:38:15,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:15,984 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T22:38:15,985 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e611192d6313,34355,1731710295755 from backup master directory 2024-11-15T22:38:15,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e611192d6313,34355,1731710295755 2024-11-15T22:38:15,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:38:15,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:38:15,994 WARN [master/e611192d6313:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T22:38:15,994 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e611192d6313,34355,1731710295755 2024-11-15T22:38:16,003 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/hbase.id] with ID: 704e1a17-56a0-43d1-a7e8-8ba04eeddcad 2024-11-15T22:38:16,004 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/.tmp/hbase.id 2024-11-15T22:38:16,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41555 is added to blk_1073741826_1002 (size=42) 2024-11-15T22:38:16,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33199 is added to blk_1073741826_1002 (size=42) 2024-11-15T22:38:16,012 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/.tmp/hbase.id]:[hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/hbase.id] 2024-11-15T22:38:16,024 INFO [master/e611192d6313:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:16,024 INFO [master/e611192d6313:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T22:38:16,026 INFO [master/e611192d6313:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-15T22:38:16,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:16,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:16,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33199 is added to blk_1073741827_1003 (size=196) 2024-11-15T22:38:16,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41555 is added to blk_1073741827_1003 (size=196) 2024-11-15T22:38:16,042 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T22:38:16,043 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T22:38:16,043 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:38:16,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33199 is added to blk_1073741828_1004 (size=1189) 2024-11-15T22:38:16,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41555 is added to blk_1073741828_1004 (size=1189) 2024-11-15T22:38:16,051 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store 2024-11-15T22:38:16,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33199 is added to blk_1073741829_1005 (size=34) 2024-11-15T22:38:16,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41555 is added to blk_1073741829_1005 (size=34) 2024-11-15T22:38:16,058 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:38:16,058 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T22:38:16,058 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:16,058 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:16,058 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T22:38:16,058 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:16,058 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:16,058 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731710296058Disabling compacts and flushes for region at 1731710296058Disabling writes for close at 1731710296058Writing region close event to WAL at 1731710296058Closed at 1731710296058 2024-11-15T22:38:16,059 WARN [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/.initializing 2024-11-15T22:38:16,059 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/WALs/e611192d6313,34355,1731710295755 2024-11-15T22:38:16,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:16,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:16,062 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C34355%2C1731710295755, suffix=, logDir=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/WALs/e611192d6313,34355,1731710295755, archiveDir=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/oldWALs, maxLogs=10 2024-11-15T22:38:16,062 INFO [master/e611192d6313:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C34355%2C1731710295755.1731710296062 2024-11-15T22:38:16,067 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/WALs/e611192d6313,34355,1731710295755/e611192d6313%2C34355%2C1731710295755.1731710296062 2024-11-15T22:38:16,068 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33675:33675),(127.0.0.1/127.0.0.1:45869:45869)] 2024-11-15T22:38:16,071 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:38:16,072 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:38:16,072 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:16,072 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:16,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:16,075 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T22:38:16,075 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:16,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:16,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:16,077 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T22:38:16,077 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:16,078 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:38:16,078 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:16,079 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T22:38:16,079 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:16,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:38:16,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:16,081 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T22:38:16,081 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:16,081 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:38:16,081 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:16,082 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:16,082 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:16,083 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:16,083 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:16,084 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T22:38:16,085 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:16,088 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:38:16,089 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=730644, jitterRate=-0.0709386020898819}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T22:38:16,089 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731710296072Initializing all the Stores at 1731710296073 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710296073Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710296074 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710296074Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710296074Cleaning up temporary data from old regions at 1731710296083 (+9 ms)Region opened successfully at 1731710296089 (+6 ms) 2024-11-15T22:38:16,090 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T22:38:16,093 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ad83416, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e611192d6313/172.17.0.3:0 2024-11-15T22:38:16,094 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T22:38:16,094 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T22:38:16,094 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T22:38:16,095 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T22:38:16,095 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T22:38:16,095 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T22:38:16,095 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T22:38:16,097 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T22:38:16,098 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T22:38:16,109 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T22:38:16,109 INFO [master/e611192d6313:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T22:38:16,110 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T22:38:16,120 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T22:38:16,120 INFO [master/e611192d6313:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T22:38:16,121 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T22:38:16,130 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T22:38:16,131 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T22:38:16,141 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T22:38:16,144 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T22:38:16,151 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T22:38:16,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T22:38:16,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T22:38:16,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:16,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:16,162 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e611192d6313,34355,1731710295755, sessionid=0x10140a61a620000, setting cluster-up flag (Was=false) 2024-11-15T22:38:16,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:16,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:16,215 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T22:38:16,216 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e611192d6313,34355,1731710295755 2024-11-15T22:38:16,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:16,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:16,267 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T22:38:16,268 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e611192d6313,34355,1731710295755 2024-11-15T22:38:16,270 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T22:38:16,271 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T22:38:16,272 INFO [master/e611192d6313:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T22:38:16,272 INFO [master/e611192d6313:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T22:38:16,272 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e611192d6313,34355,1731710295755 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T22:38:16,273 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:38:16,273 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:38:16,273 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:38:16,273 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:38:16,273 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e611192d6313:0, corePoolSize=10, maxPoolSize=10 2024-11-15T22:38:16,273 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:16,273 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e611192d6313:0, corePoolSize=2, maxPoolSize=2 2024-11-15T22:38:16,274 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:16,275 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731710326275 2024-11-15T22:38:16,275 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T22:38:16,275 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T22:38:16,275 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T22:38:16,275 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T22:38:16,275 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T22:38:16,275 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T22:38:16,275 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:16,276 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T22:38:16,276 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T22:38:16,276 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:38:16,276 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T22:38:16,276 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T22:38:16,276 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T22:38:16,276 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T22:38:16,276 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710296276,5,FailOnTimeoutGroup] 2024-11-15T22:38:16,276 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710296276,5,FailOnTimeoutGroup] 2024-11-15T22:38:16,276 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:16,276 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T22:38:16,276 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:16,276 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:16,277 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:16,277 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T22:38:16,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41555 is added to blk_1073741831_1007 (size=1321) 2024-11-15T22:38:16,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33199 is added to blk_1073741831_1007 (size=1321) 2024-11-15T22:38:16,285 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T22:38:16,285 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2 2024-11-15T22:38:16,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41555 is added to blk_1073741832_1008 (size=32) 2024-11-15T22:38:16,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33199 is added to blk_1073741832_1008 (size=32) 2024-11-15T22:38:16,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:38:16,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T22:38:16,295 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T22:38:16,295 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:16,295 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:16,296 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T22:38:16,297 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T22:38:16,297 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:16,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:16,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T22:38:16,299 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T22:38:16,299 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:16,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:16,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T22:38:16,300 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T22:38:16,300 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:16,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:16,301 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T22:38:16,302 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740 2024-11-15T22:38:16,302 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740 2024-11-15T22:38:16,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T22:38:16,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T22:38:16,303 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T22:38:16,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T22:38:16,306 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:38:16,307 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718675, jitterRate=-0.08615833520889282}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T22:38:16,307 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731710296293Initializing all the Stores at 1731710296293Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710296293Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710296293Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710296293Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710296293Cleaning up temporary data from old regions at 1731710296303 (+10 ms)Region opened successfully at 1731710296307 (+4 ms) 2024-11-15T22:38:16,307 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T22:38:16,307 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T22:38:16,307 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T22:38:16,307 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T22:38:16,307 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T22:38:16,308 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T22:38:16,308 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731710296307Disabling compacts and flushes for region at 1731710296307Disabling writes for close at 1731710296307Writing region close event to WAL at 1731710296308 (+1 ms)Closed at 1731710296308 2024-11-15T22:38:16,309 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:38:16,309 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T22:38:16,309 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T22:38:16,310 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T22:38:16,311 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T22:38:16,346 INFO [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(746): ClusterId : 704e1a17-56a0-43d1-a7e8-8ba04eeddcad 2024-11-15T22:38:16,346 DEBUG [RS:0;e611192d6313:39125 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T22:38:16,360 DEBUG [RS:0;e611192d6313:39125 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T22:38:16,360 DEBUG [RS:0;e611192d6313:39125 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T22:38:16,374 DEBUG [RS:0;e611192d6313:39125 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T22:38:16,374 DEBUG [RS:0;e611192d6313:39125 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55fe3d8e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e611192d6313/172.17.0.3:0 2024-11-15T22:38:16,389 DEBUG [RS:0;e611192d6313:39125 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e611192d6313:39125 2024-11-15T22:38:16,390 INFO [RS:0;e611192d6313:39125 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T22:38:16,390 INFO [RS:0;e611192d6313:39125 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T22:38:16,390 DEBUG [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T22:38:16,390 INFO [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(2659): reportForDuty to master=e611192d6313,34355,1731710295755 with port=39125, startcode=1731710295927 2024-11-15T22:38:16,391 DEBUG [RS:0;e611192d6313:39125 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T22:38:16,392 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33755, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T22:38:16,393 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34355 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e611192d6313,39125,1731710295927 2024-11-15T22:38:16,393 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34355 {}] master.ServerManager(517): Registering regionserver=e611192d6313,39125,1731710295927 2024-11-15T22:38:16,395 DEBUG [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2 2024-11-15T22:38:16,395 DEBUG [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41597 2024-11-15T22:38:16,395 DEBUG [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T22:38:16,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:38:16,404 DEBUG [RS:0;e611192d6313:39125 {}] zookeeper.ZKUtil(111): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e611192d6313,39125,1731710295927 2024-11-15T22:38:16,404 WARN [RS:0;e611192d6313:39125 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T22:38:16,405 INFO [RS:0;e611192d6313:39125 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:38:16,405 DEBUG [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927 2024-11-15T22:38:16,405 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e611192d6313,39125,1731710295927] 2024-11-15T22:38:16,408 INFO [RS:0;e611192d6313:39125 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T22:38:16,410 INFO [RS:0;e611192d6313:39125 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T22:38:16,410 INFO [RS:0;e611192d6313:39125 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T22:38:16,410 INFO [RS:0;e611192d6313:39125 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:16,410 INFO [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T22:38:16,411 INFO [RS:0;e611192d6313:39125 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T22:38:16,411 INFO [RS:0;e611192d6313:39125 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:16,411 DEBUG [RS:0;e611192d6313:39125 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:16,411 DEBUG [RS:0;e611192d6313:39125 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:16,411 DEBUG [RS:0;e611192d6313:39125 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:16,411 DEBUG [RS:0;e611192d6313:39125 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:16,411 DEBUG [RS:0;e611192d6313:39125 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:16,411 DEBUG [RS:0;e611192d6313:39125 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e611192d6313:0, corePoolSize=2, maxPoolSize=2 2024-11-15T22:38:16,411 DEBUG [RS:0;e611192d6313:39125 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:16,411 DEBUG [RS:0;e611192d6313:39125 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:16,412 DEBUG [RS:0;e611192d6313:39125 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:16,412 DEBUG [RS:0;e611192d6313:39125 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:16,412 DEBUG [RS:0;e611192d6313:39125 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:16,412 DEBUG [RS:0;e611192d6313:39125 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:16,412 DEBUG [RS:0;e611192d6313:39125 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:38:16,412 DEBUG [RS:0;e611192d6313:39125 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:38:16,413 INFO [RS:0;e611192d6313:39125 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:16,413 INFO [RS:0;e611192d6313:39125 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:16,414 INFO [RS:0;e611192d6313:39125 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:16,414 INFO [RS:0;e611192d6313:39125 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:16,414 INFO [RS:0;e611192d6313:39125 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:16,414 INFO [RS:0;e611192d6313:39125 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,39125,1731710295927-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T22:38:16,426 INFO [RS:0;e611192d6313:39125 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T22:38:16,427 INFO [RS:0;e611192d6313:39125 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,39125,1731710295927-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:16,427 INFO [RS:0;e611192d6313:39125 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:16,427 INFO [RS:0;e611192d6313:39125 {}] regionserver.Replication(171): e611192d6313,39125,1731710295927 started 2024-11-15T22:38:16,438 INFO [RS:0;e611192d6313:39125 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:16,438 INFO [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(1482): Serving as e611192d6313,39125,1731710295927, RpcServer on e611192d6313/172.17.0.3:39125, sessionid=0x10140a61a620001 2024-11-15T22:38:16,438 DEBUG [RS:0;e611192d6313:39125 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T22:38:16,438 DEBUG [RS:0;e611192d6313:39125 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e611192d6313,39125,1731710295927 2024-11-15T22:38:16,438 DEBUG [RS:0;e611192d6313:39125 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,39125,1731710295927' 2024-11-15T22:38:16,438 DEBUG [RS:0;e611192d6313:39125 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T22:38:16,439 DEBUG [RS:0;e611192d6313:39125 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T22:38:16,439 DEBUG [RS:0;e611192d6313:39125 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T22:38:16,440 DEBUG [RS:0;e611192d6313:39125 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T22:38:16,440 DEBUG [RS:0;e611192d6313:39125 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e611192d6313,39125,1731710295927 2024-11-15T22:38:16,440 DEBUG [RS:0;e611192d6313:39125 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,39125,1731710295927' 2024-11-15T22:38:16,440 DEBUG [RS:0;e611192d6313:39125 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T22:38:16,440 DEBUG [RS:0;e611192d6313:39125 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T22:38:16,440 DEBUG [RS:0;e611192d6313:39125 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T22:38:16,440 INFO [RS:0;e611192d6313:39125 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T22:38:16,440 INFO [RS:0;e611192d6313:39125 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T22:38:16,461 WARN [e611192d6313:34355 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T22:38:16,545 INFO [RS:0;e611192d6313:39125 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C39125%2C1731710295927, suffix=, logDir=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927, archiveDir=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/oldWALs, maxLogs=32 2024-11-15T22:38:16,548 INFO [RS:0;e611192d6313:39125 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C39125%2C1731710295927.1731710296547 2024-11-15T22:38:16,557 INFO [RS:0;e611192d6313:39125 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710296547 2024-11-15T22:38:16,558 DEBUG [RS:0;e611192d6313:39125 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33675:33675),(127.0.0.1/127.0.0.1:45869:45869)] 2024-11-15T22:38:16,712 DEBUG [e611192d6313:34355 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T22:38:16,713 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e611192d6313,39125,1731710295927 2024-11-15T22:38:16,715 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e611192d6313,39125,1731710295927, state=OPENING 2024-11-15T22:38:16,762 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T22:38:16,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:16,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:16,773 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T22:38:16,774 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:38:16,774 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:38:16,774 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e611192d6313,39125,1731710295927}] 2024-11-15T22:38:16,930 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T22:38:16,934 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51493, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T22:38:16,942 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T22:38:16,942 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:38:16,945 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C39125%2C1731710295927.meta, suffix=.meta, logDir=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927, archiveDir=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/oldWALs, maxLogs=32 2024-11-15T22:38:16,946 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C39125%2C1731710295927.meta.1731710296946.meta 2024-11-15T22:38:16,954 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.meta.1731710296946.meta 2024-11-15T22:38:16,955 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45869:45869),(127.0.0.1/127.0.0.1:33675:33675)] 2024-11-15T22:38:16,956 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:38:16,956 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T22:38:16,957 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T22:38:16,957 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T22:38:16,957 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T22:38:16,957 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:38:16,957 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T22:38:16,957 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T22:38:16,960 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T22:38:16,961 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T22:38:16,961 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:16,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:16,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T22:38:16,963 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T22:38:16,963 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:16,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:16,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T22:38:16,965 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T22:38:16,965 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:16,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:16,966 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T22:38:16,966 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T22:38:16,967 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:16,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:16,967 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T22:38:16,968 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740 2024-11-15T22:38:16,969 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740 2024-11-15T22:38:16,970 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T22:38:16,970 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T22:38:16,971 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T22:38:16,972 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T22:38:16,973 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=882767, jitterRate=0.12249711155891418}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T22:38:16,973 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T22:38:16,973 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731710296957Writing region info on filesystem at 1731710296957Initializing all the Stores at 1731710296958 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710296958Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710296960 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710296960Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710296960Cleaning up temporary data from old regions at 1731710296970 (+10 ms)Running coprocessor post-open hooks at 1731710296973 (+3 ms)Region opened successfully at 1731710296973 2024-11-15T22:38:16,974 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731710296929 2024-11-15T22:38:16,976 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T22:38:16,976 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T22:38:16,977 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e611192d6313,39125,1731710295927 2024-11-15T22:38:16,978 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e611192d6313,39125,1731710295927, state=OPEN 2024-11-15T22:38:17,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T22:38:17,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T22:38:17,021 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e611192d6313,39125,1731710295927 2024-11-15T22:38:17,021 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:38:17,021 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:38:17,025 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T22:38:17,025 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e611192d6313,39125,1731710295927 in 247 msec 2024-11-15T22:38:17,028 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T22:38:17,028 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 716 msec 2024-11-15T22:38:17,029 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:38:17,029 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T22:38:17,030 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T22:38:17,030 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e611192d6313,39125,1731710295927, seqNum=-1] 2024-11-15T22:38:17,031 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T22:38:17,032 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:32835, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T22:38:17,038 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 765 msec 2024-11-15T22:38:17,038 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731710297038, completionTime=-1 2024-11-15T22:38:17,038 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T22:38:17,038 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T22:38:17,040 INFO [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T22:38:17,040 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731710357040 2024-11-15T22:38:17,040 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731710417040 2024-11-15T22:38:17,040 INFO [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-15T22:38:17,040 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34355,1731710295755-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:17,040 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34355,1731710295755-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:17,041 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34355,1731710295755-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:17,041 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e611192d6313:34355, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:17,041 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:17,041 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:17,043 DEBUG [master/e611192d6313:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T22:38:17,044 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.050sec 2024-11-15T22:38:17,044 INFO [master/e611192d6313:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T22:38:17,044 INFO [master/e611192d6313:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T22:38:17,044 INFO [master/e611192d6313:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T22:38:17,044 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T22:38:17,045 INFO [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T22:38:17,045 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34355,1731710295755-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T22:38:17,045 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34355,1731710295755-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T22:38:17,047 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@443f9e40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:38:17,047 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e611192d6313,34355,-1 for getting cluster id 2024-11-15T22:38:17,047 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T22:38:17,047 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T22:38:17,048 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T22:38:17,048 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34355,1731710295755-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:17,049 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '704e1a17-56a0-43d1-a7e8-8ba04eeddcad' 2024-11-15T22:38:17,049 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T22:38:17,049 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "704e1a17-56a0-43d1-a7e8-8ba04eeddcad" 2024-11-15T22:38:17,050 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76b0cb2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:38:17,050 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e611192d6313,34355,-1] 2024-11-15T22:38:17,050 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T22:38:17,050 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:38:17,052 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56398, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T22:38:17,052 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b944a8d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:38:17,053 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T22:38:17,054 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e611192d6313,39125,1731710295927, seqNum=-1] 2024-11-15T22:38:17,054 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T22:38:17,056 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50396, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T22:38:17,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e611192d6313,34355,1731710295755 2024-11-15T22:38:17,058 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:17,060 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T22:38:17,060 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-15T22:38:17,060 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-15T22:38:17,060 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T22:38:17,061 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is e611192d6313,34355,1731710295755 2024-11-15T22:38:17,061 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@46fbb398 2024-11-15T22:38:17,062 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T22:38:17,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:17,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:17,063 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56404, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T22:38:17,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34355 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T22:38:17,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34355 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T22:38:17,064 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34355 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T22:38:17,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34355 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T22:38:17,067 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T22:38:17,067 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:17,067 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34355 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-15T22:38:17,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34355 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T22:38:17,069 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T22:38:17,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41555 is added to blk_1073741835_1011 (size=395) 2024-11-15T22:38:17,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33199 is added to blk_1073741835_1011 (size=395) 2024-11-15T22:38:17,077 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d5ad11521716b1271ae4fa89e5bb0efe, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2 2024-11-15T22:38:17,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33199 is added to blk_1073741836_1012 (size=78) 2024-11-15T22:38:17,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41555 is added to blk_1073741836_1012 (size=78) 2024-11-15T22:38:17,084 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:38:17,084 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing d5ad11521716b1271ae4fa89e5bb0efe, disabling compactions & flushes 2024-11-15T22:38:17,084 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe. 2024-11-15T22:38:17,084 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe. 2024-11-15T22:38:17,084 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe. after waiting 0 ms 2024-11-15T22:38:17,084 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe. 2024-11-15T22:38:17,084 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe. 2024-11-15T22:38:17,085 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for d5ad11521716b1271ae4fa89e5bb0efe: Waiting for close lock at 1731710297084Disabling compacts and flushes for region at 1731710297084Disabling writes for close at 1731710297084Writing region close event to WAL at 1731710297084Closed at 1731710297084 2024-11-15T22:38:17,086 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T22:38:17,086 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731710297086"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731710297086"}]},"ts":"1731710297086"} 2024-11-15T22:38:17,089 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T22:38:17,090 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T22:38:17,090 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731710297090"}]},"ts":"1731710297090"} 2024-11-15T22:38:17,092 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-15T22:38:17,092 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d5ad11521716b1271ae4fa89e5bb0efe, ASSIGN}] 2024-11-15T22:38:17,094 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d5ad11521716b1271ae4fa89e5bb0efe, ASSIGN 2024-11-15T22:38:17,095 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d5ad11521716b1271ae4fa89e5bb0efe, ASSIGN; state=OFFLINE, location=e611192d6313,39125,1731710295927; forceNewPlan=false, retain=false 2024-11-15T22:38:17,246 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d5ad11521716b1271ae4fa89e5bb0efe, regionState=OPENING, regionLocation=e611192d6313,39125,1731710295927 2024-11-15T22:38:17,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d5ad11521716b1271ae4fa89e5bb0efe, ASSIGN because future has completed 2024-11-15T22:38:17,254 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d5ad11521716b1271ae4fa89e5bb0efe, server=e611192d6313,39125,1731710295927}] 2024-11-15T22:38:17,417 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe. 2024-11-15T22:38:17,417 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d5ad11521716b1271ae4fa89e5bb0efe, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe.', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:38:17,418 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart d5ad11521716b1271ae4fa89e5bb0efe 2024-11-15T22:38:17,418 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:38:17,418 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d5ad11521716b1271ae4fa89e5bb0efe 2024-11-15T22:38:17,418 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d5ad11521716b1271ae4fa89e5bb0efe 2024-11-15T22:38:17,421 INFO [StoreOpener-d5ad11521716b1271ae4fa89e5bb0efe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d5ad11521716b1271ae4fa89e5bb0efe 2024-11-15T22:38:17,423 INFO [StoreOpener-d5ad11521716b1271ae4fa89e5bb0efe-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d5ad11521716b1271ae4fa89e5bb0efe columnFamilyName info 2024-11-15T22:38:17,423 DEBUG [StoreOpener-d5ad11521716b1271ae4fa89e5bb0efe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:17,424 INFO [StoreOpener-d5ad11521716b1271ae4fa89e5bb0efe-1 {}] regionserver.HStore(327): Store=d5ad11521716b1271ae4fa89e5bb0efe/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:38:17,424 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d5ad11521716b1271ae4fa89e5bb0efe 2024-11-15T22:38:17,425 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/default/TestLogRolling-testLogRollOnPipelineRestart/d5ad11521716b1271ae4fa89e5bb0efe 2024-11-15T22:38:17,425 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/default/TestLogRolling-testLogRollOnPipelineRestart/d5ad11521716b1271ae4fa89e5bb0efe 2024-11-15T22:38:17,426 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d5ad11521716b1271ae4fa89e5bb0efe 2024-11-15T22:38:17,426 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d5ad11521716b1271ae4fa89e5bb0efe 2024-11-15T22:38:17,427 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d5ad11521716b1271ae4fa89e5bb0efe 2024-11-15T22:38:17,429 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/default/TestLogRolling-testLogRollOnPipelineRestart/d5ad11521716b1271ae4fa89e5bb0efe/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:38:17,430 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d5ad11521716b1271ae4fa89e5bb0efe; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=829306, jitterRate=0.054517313838005066}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T22:38:17,430 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d5ad11521716b1271ae4fa89e5bb0efe 2024-11-15T22:38:17,430 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d5ad11521716b1271ae4fa89e5bb0efe: Running coprocessor pre-open hook at 1731710297418Writing region info on filesystem at 1731710297418Initializing all the Stores at 1731710297420 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710297420Cleaning up temporary data from old regions at 1731710297426 (+6 ms)Running coprocessor post-open hooks at 1731710297430 (+4 ms)Region opened successfully at 1731710297430 2024-11-15T22:38:17,432 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe., pid=6, masterSystemTime=1731710297408 2024-11-15T22:38:17,434 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe. 2024-11-15T22:38:17,434 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe. 2024-11-15T22:38:17,435 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d5ad11521716b1271ae4fa89e5bb0efe, regionState=OPEN, openSeqNum=2, regionLocation=e611192d6313,39125,1731710295927 2024-11-15T22:38:17,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d5ad11521716b1271ae4fa89e5bb0efe, server=e611192d6313,39125,1731710295927 because future has completed 2024-11-15T22:38:17,441 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T22:38:17,441 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d5ad11521716b1271ae4fa89e5bb0efe, server=e611192d6313,39125,1731710295927 in 186 msec 2024-11-15T22:38:17,444 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T22:38:17,444 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d5ad11521716b1271ae4fa89e5bb0efe, ASSIGN in 349 msec 2024-11-15T22:38:17,445 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T22:38:17,445 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731710297445"}]},"ts":"1731710297445"} 2024-11-15T22:38:17,447 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-15T22:38:17,448 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T22:38:17,450 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 384 msec 2024-11-15T22:38:18,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:18,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:18,402 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T22:38:18,402 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T22:38:18,404 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T22:38:18,404 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-15T22:38:18,406 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T22:38:18,406 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T22:38:19,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:19,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:20,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:20,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:21,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:21,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:21,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:21,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:21,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:21,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:21,981 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:21,981 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:21,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:21,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:21,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:21,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:22,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:22,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:22,491 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T22:38:22,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:22,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:22,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:22,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:22,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:22,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:22,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:22,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:22,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:22,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:22,529 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T22:38:22,530 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-15T22:38:23,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:23,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:24,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:24,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:25,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:25,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:26,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:26,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:27,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:27,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:27,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34355 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T22:38:27,161 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-15T22:38:27,161 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-15T22:38:27,167 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T22:38:27,167 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe. 2024-11-15T22:38:27,172 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe., hostname=e611192d6313,39125,1731710295927, seqNum=2] 2024-11-15T22:38:28,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:28,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:29,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:29,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:29,176 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710296547 2024-11-15T22:38:29,178 WARN [ResponseProcessor for block BP-1285903154-172.17.0.3-1731710293246:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1285903154-172.17.0.3-1731710293246:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:29,178 WARN [ResponseProcessor for block BP-1285903154-172.17.0.3-1731710293246:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1285903154-172.17.0.3-1731710293246:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1285903154-172.17.0.3-1731710293246:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:41555,DS-3d6e597f-4928-42c9-ac0c-688bac1495bc,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:29,178 WARN [ResponseProcessor for block BP-1285903154-172.17.0.3-1731710293246:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1285903154-172.17.0.3-1731710293246:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:29,179 WARN [DataStreamer for file /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/WALs/e611192d6313,34355,1731710295755/e611192d6313%2C34355%2C1731710295755.1731710296062 block BP-1285903154-172.17.0.3-1731710293246:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1285903154-172.17.0.3-1731710293246:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41555,DS-3d6e597f-4928-42c9-ac0c-688bac1495bc,DISK], DatanodeInfoWithStorage[127.0.0.1:33199,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41555,DS-3d6e597f-4928-42c9-ac0c-688bac1495bc,DISK]) is bad. 2024-11-15T22:38:29,179 WARN [DataStreamer for file /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.meta.1731710296946.meta block BP-1285903154-172.17.0.3-1731710293246:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1285903154-172.17.0.3-1731710293246:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33199,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK], DatanodeInfoWithStorage[127.0.0.1:41555,DS-3d6e597f-4928-42c9-ac0c-688bac1495bc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41555,DS-3d6e597f-4928-42c9-ac0c-688bac1495bc,DISK]) is bad. 2024-11-15T22:38:29,179 WARN [DataStreamer for file /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710296547 block BP-1285903154-172.17.0.3-1731710293246:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1285903154-172.17.0.3-1731710293246:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41555,DS-3d6e597f-4928-42c9-ac0c-688bac1495bc,DISK], DatanodeInfoWithStorage[127.0.0.1:33199,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41555,DS-3d6e597f-4928-42c9-ac0c-688bac1495bc,DISK]) is bad. 2024-11-15T22:38:29,179 WARN [PacketResponder: BP-1285903154-172.17.0.3-1731710293246:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41555] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:29,180 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1740476448_22 at /127.0.0.1:56132 [Receiving block BP-1285903154-172.17.0.3-1731710293246:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41555:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56132 dst: /127.0.0.1:41555 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:29,181 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1926970825_22 at /127.0.0.1:46976 [Receiving block BP-1285903154-172.17.0.3-1731710293246:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33199:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46976 dst: /127.0.0.1:33199 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:29,181 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1926970825_22 at /127.0.0.1:56178 [Receiving block BP-1285903154-172.17.0.3-1731710293246:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41555:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56178 dst: /127.0.0.1:41555 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:29,181 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1926970825_22 at /127.0.0.1:46972 [Receiving block BP-1285903154-172.17.0.3-1731710293246:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33199:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46972 dst: /127.0.0.1:33199 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:29,181 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1740476448_22 at /127.0.0.1:46938 [Receiving block BP-1285903154-172.17.0.3-1731710293246:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33199:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46938 dst: /127.0.0.1:33199 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:29,182 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1926970825_22 at /127.0.0.1:56190 [Receiving block BP-1285903154-172.17.0.3-1731710293246:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41555:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56190 dst: /127.0.0.1:41555 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:29,298 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ad6192b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:29,299 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@644dfd01{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:38:29,299 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:38:29,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ccd5f6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:38:29,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@126bd190{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir/,STOPPED} 2024-11-15T22:38:29,303 WARN [BP-1285903154-172.17.0.3-1731710293246 heartbeating to localhost/127.0.0.1:41597 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:38:29,303 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:38:29,303 WARN [BP-1285903154-172.17.0.3-1731710293246 heartbeating to localhost/127.0.0.1:41597 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1285903154-172.17.0.3-1731710293246 (Datanode Uuid 1e8bc306-eb2e-4f91-934b-c931e98c67bd) service to localhost/127.0.0.1:41597 2024-11-15T22:38:29,303 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:38:29,303 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data3/current/BP-1285903154-172.17.0.3-1731710293246 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:29,303 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data4/current/BP-1285903154-172.17.0.3-1731710293246 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:29,304 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:38:29,309 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:38:29,313 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:38:29,313 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:38:29,313 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:38:29,313 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T22:38:29,314 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3146549d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:38:29,314 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6baabd83{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:38:29,407 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21404da7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/java.io.tmpdir/jetty-localhost-40521-hadoop-hdfs-3_4_1-tests_jar-_-any-11768320954847782209/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:29,407 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6e8914c8{HTTP/1.1, (http/1.1)}{localhost:40521} 2024-11-15T22:38:29,407 INFO [Time-limited test {}] server.Server(415): Started @167565ms 2024-11-15T22:38:29,408 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:38:29,424 WARN [ResponseProcessor for block BP-1285903154-172.17.0.3-1731710293246:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1285903154-172.17.0.3-1731710293246:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:29,424 WARN [ResponseProcessor for block BP-1285903154-172.17.0.3-1731710293246:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1285903154-172.17.0.3-1731710293246:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:29,424 WARN [ResponseProcessor for block BP-1285903154-172.17.0.3-1731710293246:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1285903154-172.17.0.3-1731710293246:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:29,425 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1926970825_22 at /127.0.0.1:57508 [Receiving block BP-1285903154-172.17.0.3-1731710293246:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33199:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57508 dst: /127.0.0.1:33199 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:29,425 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1926970825_22 at /127.0.0.1:57506 [Receiving block BP-1285903154-172.17.0.3-1731710293246:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33199:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57506 dst: /127.0.0.1:33199 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:29,425 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1740476448_22 at /127.0.0.1:57498 [Receiving block BP-1285903154-172.17.0.3-1731710293246:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33199:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57498 dst: /127.0.0.1:33199 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:29,510 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@37b5496d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:29,511 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@8825f29{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:38:29,511 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:38:29,511 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a75563d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:38:29,511 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7992aa88{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir/,STOPPED} 2024-11-15T22:38:29,514 WARN [BP-1285903154-172.17.0.3-1731710293246 heartbeating to localhost/127.0.0.1:41597 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:38:29,514 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:38:29,514 WARN [BP-1285903154-172.17.0.3-1731710293246 heartbeating to localhost/127.0.0.1:41597 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1285903154-172.17.0.3-1731710293246 (Datanode Uuid 6f378189-230f-48c9-8213-c8e5fdaa10d0) service to localhost/127.0.0.1:41597 2024-11-15T22:38:29,514 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:38:29,515 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data1/current/BP-1285903154-172.17.0.3-1731710293246 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:29,516 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data2/current/BP-1285903154-172.17.0.3-1731710293246 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:29,516 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:38:29,525 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:38:29,530 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:38:29,531 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:38:29,531 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:38:29,532 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T22:38:29,532 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ec1c56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:38:29,532 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29b43d59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:38:29,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58c81822{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/java.io.tmpdir/jetty-localhost-43995-hadoop-hdfs-3_4_1-tests_jar-_-any-8020214875817138665/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:29,629 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@730e0fff{HTTP/1.1, (http/1.1)}{localhost:43995} 2024-11-15T22:38:29,629 INFO [Time-limited test {}] server.Server(415): Started @167787ms 2024-11-15T22:38:29,630 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:38:30,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:30,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:30,139 WARN [Thread-1333 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:38:30,141 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x13a3c25d6c156174 with lease ID 0xd3009b2580b17149: from storage DS-3d6e597f-4928-42c9-ac0c-688bac1495bc node DatanodeRegistration(127.0.0.1:43151, datanodeUuid=1e8bc306-eb2e-4f91-934b-c931e98c67bd, infoPort=37343, infoSecurePort=0, ipcPort=44207, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:38:30,142 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x13a3c25d6c156174 with lease ID 0xd3009b2580b17149: from storage DS-44638cc2-8414-4115-b5fe-7f982864d593 node DatanodeRegistration(127.0.0.1:43151, datanodeUuid=1e8bc306-eb2e-4f91-934b-c931e98c67bd, infoPort=37343, infoSecurePort=0, ipcPort=44207, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:38:30,289 WARN [Thread-1353 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:38:30,291 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde1122c01e0b83a2 with lease ID 0xd3009b2580b1714a: from storage DS-9e21222b-34c7-4b05-98c9-464b2da7eff3 node DatanodeRegistration(127.0.0.1:41361, datanodeUuid=6f378189-230f-48c9-8213-c8e5fdaa10d0, infoPort=43893, infoSecurePort=0, ipcPort=42889, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T22:38:30,291 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde1122c01e0b83a2 with lease ID 0xd3009b2580b1714a: from storage DS-1c1b8f57-536b-41f0-8ecb-f347d1964b55 node DatanodeRegistration(127.0.0.1:41361, datanodeUuid=6f378189-230f-48c9-8213-c8e5fdaa10d0, infoPort=43893, infoSecurePort=0, ipcPort=42889, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:38:30,650 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-15T22:38:30,656 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-15T22:38:30,657 ERROR [FSHLog-0-hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2-prefix:e611192d6313,39125,1731710295927 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33199,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:30,658 WARN [FSHLog-0-hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2-prefix:e611192d6313,39125,1731710295927 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33199,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:30,658 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e611192d6313%2C39125%2C1731710295927:(num 1731710296547) roll requested 2024-11-15T22:38:30,658 INFO [regionserver/e611192d6313:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C39125%2C1731710295927.1731710310658 2024-11-15T22:38:30,665 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710296547 newFile=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 2024-11-15T22:38:30,665 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:30,665 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:30,665 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:30,665 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:30,665 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:30,666 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710296547 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 2024-11-15T22:38:30,668 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33199,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:30,668 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33199,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:30,668 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710296547 2024-11-15T22:38:30,669 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37343:37343),(127.0.0.1/127.0.0.1:43893:43893)] 2024-11-15T22:38:30,669 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710296547 is not closed yet, will try archiving it next time 2024-11-15T22:38:30,669 WARN [IPC Server handler 2 on default port 41597 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710296547 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-15T22:38:30,669 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710296547 after 1ms 2024-11-15T22:38:31,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:31,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:32,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:32,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:32,144 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T22:38:32,676 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-15T22:38:33,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:33,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:34,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:34,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:34,671 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710296547 after 4002ms 2024-11-15T22:38:34,681 WARN [ResponseProcessor for block BP-1285903154-172.17.0.3-1731710293246:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1285903154-172.17.0.3-1731710293246:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1285903154-172.17.0.3-1731710293246:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:41361,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:34,682 WARN [DataStreamer for file /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 block BP-1285903154-172.17.0.3-1731710293246:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1285903154-172.17.0.3-1731710293246:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43151,DS-3d6e597f-4928-42c9-ac0c-688bac1495bc,DISK], DatanodeInfoWithStorage[127.0.0.1:41361,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41361,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]) is bad. 2024-11-15T22:38:34,682 WARN [PacketResponder: BP-1285903154-172.17.0.3-1731710293246:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41361] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:34,683 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1926970825_22 at /127.0.0.1:42920 [Receiving block BP-1285903154-172.17.0.3-1731710293246:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43151:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42920 dst: /127.0.0.1:43151 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:34,683 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1926970825_22 at /127.0.0.1:47522 [Receiving block BP-1285903154-172.17.0.3-1731710293246:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41361:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47522 dst: /127.0.0.1:41361 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:34,717 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58c81822{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:34,718 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@730e0fff{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:38:34,718 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:38:34,719 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29b43d59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:38:34,719 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ec1c56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir/,STOPPED} 2024-11-15T22:38:34,721 WARN [BP-1285903154-172.17.0.3-1731710293246 heartbeating to localhost/127.0.0.1:41597 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:38:34,721 WARN [BP-1285903154-172.17.0.3-1731710293246 heartbeating to localhost/127.0.0.1:41597 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1285903154-172.17.0.3-1731710293246 (Datanode Uuid 6f378189-230f-48c9-8213-c8e5fdaa10d0) service to localhost/127.0.0.1:41597 2024-11-15T22:38:34,722 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data1/current/BP-1285903154-172.17.0.3-1731710293246 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:34,722 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data2/current/BP-1285903154-172.17.0.3-1731710293246 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:34,722 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:38:34,722 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:38:34,722 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:38:34,731 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:38:34,734 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:38:34,735 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:38:34,735 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:38:34,735 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T22:38:34,735 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bc7279c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:38:34,736 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3df3f65e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:38:34,828 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f03ce36{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/java.io.tmpdir/jetty-localhost-43579-hadoop-hdfs-3_4_1-tests_jar-_-any-8387903124444715750/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:34,829 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@cbccde7{HTTP/1.1, (http/1.1)}{localhost:43579} 2024-11-15T22:38:34,829 INFO [Time-limited test {}] server.Server(415): Started @172986ms 2024-11-15T22:38:34,830 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:38:34,844 WARN [ResponseProcessor for block BP-1285903154-172.17.0.3-1731710293246:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1285903154-172.17.0.3-1731710293246:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:34,845 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1926970825_22 at /127.0.0.1:47208 [Receiving block BP-1285903154-172.17.0.3-1731710293246:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43151:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47208 dst: /127.0.0.1:43151 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:34,849 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21404da7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:34,849 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6e8914c8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:38:34,849 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:38:34,849 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6baabd83{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:38:34,850 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3146549d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir/,STOPPED} 2024-11-15T22:38:34,850 WARN [BP-1285903154-172.17.0.3-1731710293246 heartbeating to localhost/127.0.0.1:41597 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:38:34,850 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:38:34,850 WARN [BP-1285903154-172.17.0.3-1731710293246 heartbeating to localhost/127.0.0.1:41597 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1285903154-172.17.0.3-1731710293246 (Datanode Uuid 1e8bc306-eb2e-4f91-934b-c931e98c67bd) service to localhost/127.0.0.1:41597 2024-11-15T22:38:34,850 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:38:34,851 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data3/current/BP-1285903154-172.17.0.3-1731710293246 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:34,851 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data4/current/BP-1285903154-172.17.0.3-1731710293246 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:34,852 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:38:34,862 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:38:34,865 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:38:34,867 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:38:34,867 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:38:34,867 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T22:38:34,868 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72d2e6e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:38:34,868 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3de0aa3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:38:34,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ca27a78{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/java.io.tmpdir/jetty-localhost-41609-hadoop-hdfs-3_4_1-tests_jar-_-any-3189475114668887146/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:34,960 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54131bab{HTTP/1.1, (http/1.1)}{localhost:41609} 2024-11-15T22:38:34,960 INFO [Time-limited test {}] server.Server(415): Started @173118ms 2024-11-15T22:38:34,962 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:38:35,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:35,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:35,477 WARN [Thread-1407 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:38:35,479 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4eb6fa006b310905 with lease ID 0xd3009b2580b1714b: from storage DS-9e21222b-34c7-4b05-98c9-464b2da7eff3 node DatanodeRegistration(127.0.0.1:32783, datanodeUuid=6f378189-230f-48c9-8213-c8e5fdaa10d0, infoPort=39213, infoSecurePort=0, ipcPort=38867, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:38:35,479 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4eb6fa006b310905 with lease ID 0xd3009b2580b1714b: from storage DS-1c1b8f57-536b-41f0-8ecb-f347d1964b55 node DatanodeRegistration(127.0.0.1:32783, datanodeUuid=6f378189-230f-48c9-8213-c8e5fdaa10d0, infoPort=39213, infoSecurePort=0, ipcPort=38867, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:38:35,646 WARN [Thread-1427 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:38:35,649 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x678845a64828a2af with lease ID 0xd3009b2580b1714c: from storage DS-3d6e597f-4928-42c9-ac0c-688bac1495bc node DatanodeRegistration(127.0.0.1:44217, datanodeUuid=1e8bc306-eb2e-4f91-934b-c931e98c67bd, infoPort=36353, infoSecurePort=0, ipcPort=36001, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:38:35,649 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x678845a64828a2af with lease ID 0xd3009b2580b1714c: from storage DS-44638cc2-8414-4115-b5fe-7f982864d593 node DatanodeRegistration(127.0.0.1:44217, datanodeUuid=1e8bc306-eb2e-4f91-934b-c931e98c67bd, infoPort=36353, infoSecurePort=0, ipcPort=36001, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:38:35,977 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-15T22:38:35,981 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-15T22:38:35,985 ERROR [FSHLog-0-hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2-prefix:e611192d6313,39125,1731710295927 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43151,DS-3d6e597f-4928-42c9-ac0c-688bac1495bc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:35,985 WARN [FSHLog-0-hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2-prefix:e611192d6313,39125,1731710295927 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43151,DS-3d6e597f-4928-42c9-ac0c-688bac1495bc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:35,985 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e611192d6313%2C39125%2C1731710295927:(num 1731710310658) roll requested 2024-11-15T22:38:35,986 INFO [regionserver/e611192d6313:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C39125%2C1731710295927.1731710315985 2024-11-15T22:38:35,993 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 newFile=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710315985 2024-11-15T22:38:35,994 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:35,994 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:35,994 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:35,994 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:35,994 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:35,994 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710315985 2024-11-15T22:38:35,994 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43151,DS-3d6e597f-4928-42c9-ac0c-688bac1495bc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:35,994 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43151,DS-3d6e597f-4928-42c9-ac0c-688bac1495bc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:35,995 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 2024-11-15T22:38:35,995 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39213:39213),(127.0.0.1/127.0.0.1:36353:36353)] 2024-11-15T22:38:35,995 WARN [IPC Server handler 1 on default port 41597 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-15T22:38:35,995 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 is not closed yet, will try archiving it next time 2024-11-15T22:38:35,995 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 after 0ms 2024-11-15T22:38:36,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:36,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:37,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:37,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:37,997 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C39125%2C1731710295927.1731710317997 2024-11-15T22:38:38,008 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710315985 newFile=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 2024-11-15T22:38:38,008 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:38,008 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:38,009 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:38,009 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:38,009 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:38,009 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710315985 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 2024-11-15T22:38:38,010 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39213:39213),(127.0.0.1/127.0.0.1:36353:36353)] 2024-11-15T22:38:38,010 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 is not closed yet, will try archiving it next time 2024-11-15T22:38:38,010 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710315985 is not closed yet, will try archiving it next time 2024-11-15T22:38:38,011 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710296547 2024-11-15T22:38:38,011 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710296547 2024-11-15T22:38:38,012 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710296547 after 1ms 2024-11-15T22:38:38,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741838_1019 (size=1264) 2024-11-15T22:38:38,012 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710296547 2024-11-15T22:38:38,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741838_1019 (size=1264) 2024-11-15T22:38:38,014 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 is not closed yet, will try archiving it next time 2024-11-15T22:38:38,024 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731710297431/Put/vlen=218/seqid=0] 2024-11-15T22:38:38,025 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731710307174/Put/vlen=1045/seqid=0] 2024-11-15T22:38:38,025 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710296547 2024-11-15T22:38:38,025 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 2024-11-15T22:38:38,025 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 2024-11-15T22:38:38,025 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 after 0ms 2024-11-15T22:38:38,025 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 2024-11-15T22:38:38,029 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731710310657/Put/vlen=1045/seqid=0] 2024-11-15T22:38:38,029 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731710312678/Put/vlen=1045/seqid=0] 2024-11-15T22:38:38,029 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 2024-11-15T22:38:38,029 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710315985 2024-11-15T22:38:38,029 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710315985 2024-11-15T22:38:38,030 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710315985 after 1ms 2024-11-15T22:38:38,030 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710315985 2024-11-15T22:38:38,034 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731710315984/Put/vlen=1045/seqid=0] 2024-11-15T22:38:38,034 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 2024-11-15T22:38:38,034 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 2024-11-15T22:38:38,034 WARN [IPC Server handler 3 on default port 41597 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-15T22:38:38,034 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 after 0ms 2024-11-15T22:38:38,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:38,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:38,660 WARN [ResponseProcessor for block BP-1285903154-172.17.0.3-1731710293246:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1285903154-172.17.0.3-1731710293246:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:38,660 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1740476448_22 at /127.0.0.1:44260 [Receiving block BP-1285903154-172.17.0.3-1731710293246:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:32783:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44260 dst: /127.0.0.1:32783 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:32783 remote=/127.0.0.1:44260]. Total timeout mills is 60000, 59347 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:38,660 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1740476448_22 at /127.0.0.1:59276 [Receiving block BP-1285903154-172.17.0.3-1731710293246:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:44217:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59276 dst: /127.0.0.1:44217 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:38:38,660 WARN [DataStreamer for file /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 block BP-1285903154-172.17.0.3-1731710293246:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1285903154-172.17.0.3-1731710293246:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32783,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK], DatanodeInfoWithStorage[127.0.0.1:44217,DS-3d6e597f-4928-42c9-ac0c-688bac1495bc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32783,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]) is bad. 2024-11-15T22:38:38,661 WARN [DataStreamer for file /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 block BP-1285903154-172.17.0.3-1731710293246:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1285903154-172.17.0.3-1731710293246:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:38,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741839_1022 (size=85) 2024-11-15T22:38:39,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:39,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:39,998 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710310658 after 4003ms 2024-11-15T22:38:40,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:40,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:41,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:41,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:41,481 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T22:38:42,036 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 after 4002ms 2024-11-15T22:38:42,036 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 2024-11-15T22:38:42,045 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 2024-11-15T22:38:42,046 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-15T22:38:42,047 ERROR [FSHLog-0-hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2-prefix:e611192d6313,39125,1731710295927.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33199,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:42,047 WARN [FSHLog-0-hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2-prefix:e611192d6313,39125,1731710295927.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33199,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:42,047 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e611192d6313%2C39125%2C1731710295927.meta:.meta(num 1731710296946) roll requested 2024-11-15T22:38:42,047 INFO [regionserver/e611192d6313:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C39125%2C1731710295927.meta.1731710322047.meta 2024-11-15T22:38:42,053 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:42,053 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:42,053 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:42,053 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:42,053 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:42,053 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.meta.1731710296946.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.meta.1731710322047.meta 2024-11-15T22:38:42,055 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33199,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:42,055 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33199,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:42,055 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.meta.1731710296946.meta 2024-11-15T22:38:42,056 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39213:39213),(127.0.0.1/127.0.0.1:36353:36353)] 2024-11-15T22:38:42,056 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.meta.1731710296946.meta is not closed yet, will try archiving it next time 2024-11-15T22:38:42,056 WARN [IPC Server handler 1 on default port 41597 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.meta.1731710296946.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1015 2024-11-15T22:38:42,056 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.meta.1731710296946.meta after 1ms 2024-11-15T22:38:42,069 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/.tmp/info/d169f6e2645f49ec879ca49b04ee1352 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe./info:regioninfo/1731710297435/Put/seqid=0 2024-11-15T22:38:42,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741841_1025 (size=7125) 2024-11-15T22:38:42,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741841_1025 (size=7125) 2024-11-15T22:38:42,075 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/.tmp/info/d169f6e2645f49ec879ca49b04ee1352 2024-11-15T22:38:42,094 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/.tmp/ns/de761bd0bc0742f9997c154129c24aa6 is 43, key is default/ns:d/1731710297032/Put/seqid=0 2024-11-15T22:38:42,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:42,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:42,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741842_1026 (size=5153) 2024-11-15T22:38:42,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741842_1026 (size=5153) 2024-11-15T22:38:42,099 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/.tmp/ns/de761bd0bc0742f9997c154129c24aa6 2024-11-15T22:38:42,119 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/.tmp/table/41d6b509ae16445c872a2e492587f331 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731710297445/Put/seqid=0 2024-11-15T22:38:42,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741843_1027 (size=5438) 2024-11-15T22:38:42,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741843_1027 (size=5438) 2024-11-15T22:38:42,124 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/.tmp/table/41d6b509ae16445c872a2e492587f331 2024-11-15T22:38:42,130 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/.tmp/info/d169f6e2645f49ec879ca49b04ee1352 as hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/info/d169f6e2645f49ec879ca49b04ee1352 2024-11-15T22:38:42,135 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/info/d169f6e2645f49ec879ca49b04ee1352, entries=10, sequenceid=11, filesize=7.0 K 2024-11-15T22:38:42,135 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/.tmp/ns/de761bd0bc0742f9997c154129c24aa6 as hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/ns/de761bd0bc0742f9997c154129c24aa6 2024-11-15T22:38:42,141 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/ns/de761bd0bc0742f9997c154129c24aa6, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T22:38:42,142 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/.tmp/table/41d6b509ae16445c872a2e492587f331 as hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/table/41d6b509ae16445c872a2e492587f331 2024-11-15T22:38:42,147 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/table/41d6b509ae16445c872a2e492587f331, entries=2, sequenceid=11, filesize=5.3 K 2024-11-15T22:38:42,149 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 102ms, sequenceid=11, compaction requested=false 2024-11-15T22:38:42,149 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-15T22:38:42,149 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d5ad11521716b1271ae4fa89e5bb0efe 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-15T22:38:42,149 ERROR [FSHLog-0-hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2-prefix:e611192d6313,39125,1731710295927 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1285903154-172.17.0.3-1731710293246:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:42,150 WARN [FSHLog-0-hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2-prefix:e611192d6313,39125,1731710295927 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1285903154-172.17.0.3-1731710293246:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:42,150 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e611192d6313%2C39125%2C1731710295927:(num 1731710317997) roll requested 2024-11-15T22:38:42,150 INFO [regionserver/e611192d6313:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C39125%2C1731710295927.1731710322150 2024-11-15T22:38:42,155 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 newFile=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710322150 2024-11-15T22:38:42,156 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:42,156 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:42,156 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:42,156 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:42,156 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:42,156 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710322150 2024-11-15T22:38:42,156 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1285903154-172.17.0.3-1731710293246:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:42,157 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1285903154-172.17.0.3-1731710293246:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:42,157 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 2024-11-15T22:38:42,157 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 after 0ms 2024-11-15T22:38:42,157 DEBUG [regionserver/e611192d6313:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39213:39213),(127.0.0.1/127.0.0.1:36353:36353)] 2024-11-15T22:38:42,158 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.1731710317997 to hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/oldWALs/e611192d6313%2C39125%2C1731710295927.1731710317997 2024-11-15T22:38:42,175 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/default/TestLogRolling-testLogRollOnPipelineRestart/d5ad11521716b1271ae4fa89e5bb0efe/.tmp/info/83d2e5754bd14ef5a2c7fc0f54c03e91 is 1080, key is row1002/info:/1731710307174/Put/seqid=0 2024-11-15T22:38:42,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741845_1029 (size=9270) 2024-11-15T22:38:42,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741845_1029 (size=9270) 2024-11-15T22:38:42,181 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/default/TestLogRolling-testLogRollOnPipelineRestart/d5ad11521716b1271ae4fa89e5bb0efe/.tmp/info/83d2e5754bd14ef5a2c7fc0f54c03e91 2024-11-15T22:38:42,186 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/default/TestLogRolling-testLogRollOnPipelineRestart/d5ad11521716b1271ae4fa89e5bb0efe/.tmp/info/83d2e5754bd14ef5a2c7fc0f54c03e91 as hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/default/TestLogRolling-testLogRollOnPipelineRestart/d5ad11521716b1271ae4fa89e5bb0efe/info/83d2e5754bd14ef5a2c7fc0f54c03e91 2024-11-15T22:38:42,191 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/default/TestLogRolling-testLogRollOnPipelineRestart/d5ad11521716b1271ae4fa89e5bb0efe/info/83d2e5754bd14ef5a2c7fc0f54c03e91, entries=4, sequenceid=8, filesize=9.1 K 2024-11-15T22:38:42,192 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for d5ad11521716b1271ae4fa89e5bb0efe in 43ms, sequenceid=8, compaction requested=false 2024-11-15T22:38:42,192 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d5ad11521716b1271ae4fa89e5bb0efe: 2024-11-15T22:38:42,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T22:38:42,198 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T22:38:42,198 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:38:42,198 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:38:42,198 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:38:42,198 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T22:38:42,198 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T22:38:42,198 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=207666871, stopped=false 2024-11-15T22:38:42,198 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e611192d6313,34355,1731710295755 2024-11-15T22:38:42,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T22:38:42,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T22:38:42,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:42,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:42,267 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T22:38:42,268 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T22:38:42,268 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:38:42,269 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:38:42,269 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:38:42,269 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:38:42,269 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e611192d6313,39125,1731710295927' ***** 2024-11-15T22:38:42,270 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T22:38:42,270 INFO [RS:0;e611192d6313:39125 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T22:38:42,271 INFO [RS:0;e611192d6313:39125 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T22:38:42,271 INFO [RS:0;e611192d6313:39125 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T22:38:42,271 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T22:38:42,271 INFO [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(3091): Received CLOSE for d5ad11521716b1271ae4fa89e5bb0efe 2024-11-15T22:38:42,271 INFO [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(959): stopping server e611192d6313,39125,1731710295927 2024-11-15T22:38:42,271 INFO [RS:0;e611192d6313:39125 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T22:38:42,272 INFO [RS:0;e611192d6313:39125 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e611192d6313:39125. 2024-11-15T22:38:42,272 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d5ad11521716b1271ae4fa89e5bb0efe, disabling compactions & flushes 2024-11-15T22:38:42,272 DEBUG [RS:0;e611192d6313:39125 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:38:42,272 DEBUG [RS:0;e611192d6313:39125 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:38:42,272 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe. 2024-11-15T22:38:42,272 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe. 2024-11-15T22:38:42,272 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe. after waiting 0 ms 2024-11-15T22:38:42,272 INFO [RS:0;e611192d6313:39125 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T22:38:42,272 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe. 2024-11-15T22:38:42,272 INFO [RS:0;e611192d6313:39125 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T22:38:42,272 INFO [RS:0;e611192d6313:39125 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T22:38:42,272 INFO [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T22:38:42,273 INFO [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T22:38:42,273 DEBUG [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, d5ad11521716b1271ae4fa89e5bb0efe=TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe.} 2024-11-15T22:38:42,273 DEBUG [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d5ad11521716b1271ae4fa89e5bb0efe 2024-11-15T22:38:42,273 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T22:38:42,273 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T22:38:42,273 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T22:38:42,273 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T22:38:42,273 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T22:38:42,277 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/default/TestLogRolling-testLogRollOnPipelineRestart/d5ad11521716b1271ae4fa89e5bb0efe/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-15T22:38:42,278 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe. 2024-11-15T22:38:42,278 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d5ad11521716b1271ae4fa89e5bb0efe: Waiting for close lock at 1731710322271Running coprocessor pre-close hooks at 1731710322272 (+1 ms)Disabling compacts and flushes for region at 1731710322272Disabling writes for close at 1731710322272Writing region close event to WAL at 1731710322273 (+1 ms)Running coprocessor post-close hooks at 1731710322278 (+5 ms)Closed at 1731710322278 2024-11-15T22:38:42,278 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T22:38:42,278 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731710297064.d5ad11521716b1271ae4fa89e5bb0efe. 2024-11-15T22:38:42,278 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T22:38:42,278 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T22:38:42,279 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731710322273Running coprocessor pre-close hooks at 1731710322273Disabling compacts and flushes for region at 1731710322273Disabling writes for close at 1731710322273Writing region close event to WAL at 1731710322275 (+2 ms)Running coprocessor post-close hooks at 1731710322278 (+3 ms)Closed at 1731710322278 2024-11-15T22:38:42,279 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T22:38:42,415 INFO [regionserver/e611192d6313:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T22:38:42,415 INFO [regionserver/e611192d6313:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T22:38:42,416 INFO [regionserver/e611192d6313:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T22:38:42,473 INFO [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(976): stopping server e611192d6313,39125,1731710295927; all regions closed. 2024-11-15T22:38:42,474 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:42,475 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:42,475 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:42,476 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:42,476 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:42,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741840_1023 (size=825) 2024-11-15T22:38:42,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741840_1023 (size=825) 2024-11-15T22:38:43,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:43,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:44,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:44,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:44,651 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T22:38:45,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:45,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:45,732 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T22:38:46,058 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.meta.1731710296946.meta after 4002ms 2024-11-15T22:38:46,059 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/WALs/e611192d6313,39125,1731710295927/e611192d6313%2C39125%2C1731710295927.meta.1731710296946.meta to hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/oldWALs/e611192d6313%2C39125%2C1731710295927.meta.1731710296946.meta 2024-11-15T22:38:46,067 DEBUG [RS:0;e611192d6313:39125 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/oldWALs 2024-11-15T22:38:46,067 INFO [RS:0;e611192d6313:39125 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e611192d6313%2C39125%2C1731710295927.meta:.meta(num 1731710322047) 2024-11-15T22:38:46,068 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:46,068 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:46,069 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:46,069 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:46,069 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:46,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741844_1028 (size=1162) 2024-11-15T22:38:46,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741844_1028 (size=1162) 2024-11-15T22:38:46,077 DEBUG [RS:0;e611192d6313:39125 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/oldWALs 2024-11-15T22:38:46,077 INFO [RS:0;e611192d6313:39125 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e611192d6313%2C39125%2C1731710295927:(num 1731710322150) 2024-11-15T22:38:46,077 DEBUG [RS:0;e611192d6313:39125 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:38:46,077 INFO [RS:0;e611192d6313:39125 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T22:38:46,077 INFO [RS:0;e611192d6313:39125 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T22:38:46,077 INFO [RS:0;e611192d6313:39125 {}] hbase.ChoreService(370): Chore service for: regionserver/e611192d6313:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T22:38:46,078 INFO [RS:0;e611192d6313:39125 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T22:38:46,078 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T22:38:46,078 INFO [RS:0;e611192d6313:39125 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39125 2024-11-15T22:38:46,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:46,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:46,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e611192d6313,39125,1731710295927 2024-11-15T22:38:46,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:38:46,150 INFO [RS:0;e611192d6313:39125 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T22:38:46,161 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e611192d6313,39125,1731710295927] 2024-11-15T22:38:46,171 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e611192d6313,39125,1731710295927 already deleted, retry=false 2024-11-15T22:38:46,171 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e611192d6313,39125,1731710295927 expired; onlineServers=0 2024-11-15T22:38:46,171 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e611192d6313,34355,1731710295755' ***** 2024-11-15T22:38:46,172 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T22:38:46,172 INFO [M:0;e611192d6313:34355 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T22:38:46,172 INFO [M:0;e611192d6313:34355 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T22:38:46,173 DEBUG [M:0;e611192d6313:34355 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T22:38:46,173 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T22:38:46,173 DEBUG [master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710296276 {}] cleaner.HFileCleaner(306): Exit Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710296276,5,FailOnTimeoutGroup] 2024-11-15T22:38:46,173 DEBUG [M:0;e611192d6313:34355 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T22:38:46,173 DEBUG [master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710296276 {}] cleaner.HFileCleaner(306): Exit Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710296276,5,FailOnTimeoutGroup] 2024-11-15T22:38:46,174 INFO [M:0;e611192d6313:34355 {}] hbase.ChoreService(370): Chore service for: master/e611192d6313:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T22:38:46,174 INFO [M:0;e611192d6313:34355 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T22:38:46,174 DEBUG [M:0;e611192d6313:34355 {}] master.HMaster(1795): Stopping service threads 2024-11-15T22:38:46,175 INFO [M:0;e611192d6313:34355 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T22:38:46,175 INFO [M:0;e611192d6313:34355 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T22:38:46,175 INFO [M:0;e611192d6313:34355 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T22:38:46,176 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T22:38:46,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T22:38:46,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:46,181 DEBUG [M:0;e611192d6313:34355 {}] zookeeper.ZKUtil(347): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T22:38:46,181 WARN [M:0;e611192d6313:34355 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T22:38:46,182 INFO [M:0;e611192d6313:34355 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/.lastflushedseqids 2024-11-15T22:38:46,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741846_1030 (size=111) 2024-11-15T22:38:46,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741846_1030 (size=111) 2024-11-15T22:38:46,188 INFO [M:0;e611192d6313:34355 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T22:38:46,189 INFO [M:0;e611192d6313:34355 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T22:38:46,189 DEBUG [M:0;e611192d6313:34355 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T22:38:46,189 INFO [M:0;e611192d6313:34355 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:46,189 DEBUG [M:0;e611192d6313:34355 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:46,189 DEBUG [M:0;e611192d6313:34355 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T22:38:46,189 DEBUG [M:0;e611192d6313:34355 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:46,189 INFO [M:0;e611192d6313:34355 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-15T22:38:46,189 ERROR [FSHLog-0-hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData-prefix:e611192d6313,34355,1731710295755 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33199,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:46,190 WARN [FSHLog-0-hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData-prefix:e611192d6313,34355,1731710295755 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33199,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:46,190 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog e611192d6313%2C34355%2C1731710295755:(num 1731710296062) roll requested 2024-11-15T22:38:46,190 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C34355%2C1731710295755.1731710326190 2024-11-15T22:38:46,197 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:46,197 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:46,197 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:46,197 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:46,197 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:46,197 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/WALs/e611192d6313,34355,1731710295755/e611192d6313%2C34355%2C1731710295755.1731710296062 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/WALs/e611192d6313,34355,1731710295755/e611192d6313%2C34355%2C1731710295755.1731710326190 2024-11-15T22:38:46,198 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33199,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:46,198 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33199,DS-9e21222b-34c7-4b05-98c9-464b2da7eff3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T22:38:46,198 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/WALs/e611192d6313,34355,1731710295755/e611192d6313%2C34355%2C1731710295755.1731710296062 2024-11-15T22:38:46,199 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39213:39213),(127.0.0.1/127.0.0.1:36353:36353)] 2024-11-15T22:38:46,199 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/WALs/e611192d6313,34355,1731710295755/e611192d6313%2C34355%2C1731710295755.1731710296062 is not closed yet, will try archiving it next time 2024-11-15T22:38:46,199 WARN [IPC Server handler 1 on default port 41597 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/WALs/e611192d6313,34355,1731710295755/e611192d6313%2C34355%2C1731710295755.1731710296062 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-15T22:38:46,199 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/WALs/e611192d6313,34355,1731710295755/e611192d6313%2C34355%2C1731710295755.1731710296062 after 1ms 2024-11-15T22:38:46,218 DEBUG [M:0;e611192d6313:34355 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/355a36c313cb4781a908c7142eccdd79 is 82, key is hbase:meta,,1/info:regioninfo/1731710296977/Put/seqid=0 2024-11-15T22:38:46,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741848_1033 (size=5672) 2024-11-15T22:38:46,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741848_1033 (size=5672) 2024-11-15T22:38:46,223 INFO [M:0;e611192d6313:34355 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/355a36c313cb4781a908c7142eccdd79 2024-11-15T22:38:46,243 DEBUG [M:0;e611192d6313:34355 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4fbe2abacdca4c0bb8a77a2d403c15ab is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731710297449/Put/seqid=0 2024-11-15T22:38:46,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741849_1034 (size=6117) 2024-11-15T22:38:46,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741849_1034 (size=6117) 2024-11-15T22:38:46,248 INFO [M:0;e611192d6313:34355 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4fbe2abacdca4c0bb8a77a2d403c15ab 2024-11-15T22:38:46,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:38:46,261 INFO [RS:0;e611192d6313:39125 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T22:38:46,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39125-0x10140a61a620001, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:38:46,261 INFO [RS:0;e611192d6313:39125 {}] regionserver.HRegionServer(1031): Exiting; stopping=e611192d6313,39125,1731710295927; zookeeper connection closed. 2024-11-15T22:38:46,261 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1862ed {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1862ed 2024-11-15T22:38:46,261 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T22:38:46,265 DEBUG [M:0;e611192d6313:34355 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1d256b7b3224452cba04da3d618848c9 is 69, key is e611192d6313,39125,1731710295927/rs:state/1731710296393/Put/seqid=0 2024-11-15T22:38:46,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741850_1035 (size=5156) 2024-11-15T22:38:46,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741850_1035 (size=5156) 2024-11-15T22:38:46,270 INFO [M:0;e611192d6313:34355 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1d256b7b3224452cba04da3d618848c9 2024-11-15T22:38:46,288 DEBUG [M:0;e611192d6313:34355 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b6364a9c33554772beddfb71f7cb38fc is 52, key is load_balancer_on/state:d/1731710297059/Put/seqid=0 2024-11-15T22:38:46,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741851_1036 (size=5056) 2024-11-15T22:38:46,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741851_1036 (size=5056) 2024-11-15T22:38:46,293 INFO [M:0;e611192d6313:34355 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b6364a9c33554772beddfb71f7cb38fc 2024-11-15T22:38:46,298 DEBUG [M:0;e611192d6313:34355 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/355a36c313cb4781a908c7142eccdd79 as hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/355a36c313cb4781a908c7142eccdd79 2024-11-15T22:38:46,303 INFO [M:0;e611192d6313:34355 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/355a36c313cb4781a908c7142eccdd79, entries=8, sequenceid=56, filesize=5.5 K 2024-11-15T22:38:46,304 DEBUG [M:0;e611192d6313:34355 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4fbe2abacdca4c0bb8a77a2d403c15ab as hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4fbe2abacdca4c0bb8a77a2d403c15ab 2024-11-15T22:38:46,308 INFO [M:0;e611192d6313:34355 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4fbe2abacdca4c0bb8a77a2d403c15ab, entries=6, sequenceid=56, filesize=6.0 K 2024-11-15T22:38:46,309 DEBUG [M:0;e611192d6313:34355 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1d256b7b3224452cba04da3d618848c9 as hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1d256b7b3224452cba04da3d618848c9 2024-11-15T22:38:46,314 INFO [M:0;e611192d6313:34355 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1d256b7b3224452cba04da3d618848c9, entries=1, sequenceid=56, filesize=5.0 K 2024-11-15T22:38:46,315 DEBUG [M:0;e611192d6313:34355 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b6364a9c33554772beddfb71f7cb38fc as hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b6364a9c33554772beddfb71f7cb38fc 2024-11-15T22:38:46,320 INFO [M:0;e611192d6313:34355 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b6364a9c33554772beddfb71f7cb38fc, entries=1, sequenceid=56, filesize=4.9 K 2024-11-15T22:38:46,321 INFO [M:0;e611192d6313:34355 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=56, compaction requested=false 2024-11-15T22:38:46,323 INFO [M:0;e611192d6313:34355 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:46,323 DEBUG [M:0;e611192d6313:34355 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731710326189Disabling compacts and flushes for region at 1731710326189Disabling writes for close at 1731710326189Obtaining lock to block concurrent updates at 1731710326189Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731710326189Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1731710326189Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731710326199 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731710326200 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731710326217 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731710326217Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731710326228 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731710326242 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731710326242Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731710326252 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731710326265 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731710326265Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731710326275 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731710326287 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731710326287Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52f0a14e: reopening flushed file at 1731710326297 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2add59d7: reopening flushed file at 1731710326303 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5277282e: reopening flushed file at 1731710326308 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26aa37a2: reopening flushed file at 1731710326314 (+6 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=56, compaction requested=false at 1731710326321 (+7 ms)Writing region close event to WAL at 1731710326323 (+2 ms)Closed at 1731710326323 2024-11-15T22:38:46,323 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:46,323 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:46,323 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:46,323 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:46,323 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:38:46,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741847_1031 (size=757) 2024-11-15T22:38:46,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32783 is added to blk_1073741847_1031 (size=757) 2024-11-15T22:38:47,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:47,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:47,279 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,304 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,307 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,307 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,307 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,309 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,522 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@25421c9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32783, datanodeUuid=6f378189-230f-48c9-8213-c8e5fdaa10d0, infoPort=39213, infoSecurePort=0, ipcPort=38867, storageInfo=lv=-57;cid=testClusterID;nsid=1105603990;c=1731710293246):Failed to transfer BP-1285903154-172.17.0.3-1731710293246:blk_1073741830_1032 to 127.0.0.1:44217 got java.net.SocketException: Original Exception : java.io.IOException: Connection reset by peer at sun.nio.ch.FileChannelImpl.transferTo0(Native Method) ~[?:?] at sun.nio.ch.FileChannelImpl.transferToDirectlyInternal(FileChannelImpl.java:508) ~[?:?] at sun.nio.ch.FileChannelImpl.transferToDirectly(FileChannelImpl.java:573) ~[?:?] at sun.nio.ch.FileChannelImpl.transferTo(FileChannelImpl.java:695) ~[?:?] at org.apache.hadoop.net.SocketOutputStream.transferToFully(SocketOutputStream.java:222) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.FileIoProvider.transferToSocketFully(FileIoProvider.java:278) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockSender.sendPacket(BlockSender.java:619) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockSender.doSendBlock(BlockSender.java:819) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockSender.sendBlock(BlockSender.java:766) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3102) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Connection reset by peer ... 13 more 2024-11-15T22:38:47,651 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T22:38:47,815 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T22:38:47,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:47,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:48,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:48,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:48,402 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T22:38:48,402 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T22:38:48,403 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T22:38:48,403 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T22:38:49,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:49,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:50,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:50,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:50,201 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/WALs/e611192d6313,34355,1731710295755/e611192d6313%2C34355%2C1731710295755.1731710296062 after 4003ms 2024-11-15T22:38:50,202 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/WALs/e611192d6313,34355,1731710295755/e611192d6313%2C34355%2C1731710295755.1731710296062 to hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/oldWALs/e611192d6313%2C34355%2C1731710295755.1731710296062 2024-11-15T22:38:50,210 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/MasterData/oldWALs/e611192d6313%2C34355%2C1731710295755.1731710296062 to hdfs://localhost:41597/user/jenkins/test-data/3483bf75-a45a-c428-c63f-239c731416d2/oldWALs/e611192d6313%2C34355%2C1731710295755.1731710296062$masterlocalwal$ 2024-11-15T22:38:50,210 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T22:38:50,210 INFO [M:0;e611192d6313:34355 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T22:38:50,210 INFO [M:0;e611192d6313:34355 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34355 2024-11-15T22:38:50,210 INFO [M:0;e611192d6313:34355 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T22:38:50,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:38:50,377 INFO [M:0;e611192d6313:34355 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T22:38:50,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34355-0x10140a61a620000, quorum=127.0.0.1:55589, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:38:50,412 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ca27a78{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:50,413 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54131bab{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:38:50,413 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:38:50,413 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3de0aa3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:38:50,413 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72d2e6e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir/,STOPPED} 2024-11-15T22:38:50,416 WARN [BP-1285903154-172.17.0.3-1731710293246 heartbeating to localhost/127.0.0.1:41597 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:38:50,416 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:38:50,416 WARN [BP-1285903154-172.17.0.3-1731710293246 heartbeating to localhost/127.0.0.1:41597 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1285903154-172.17.0.3-1731710293246 (Datanode Uuid 1e8bc306-eb2e-4f91-934b-c931e98c67bd) service to localhost/127.0.0.1:41597 2024-11-15T22:38:50,416 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:38:50,418 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data3/current/BP-1285903154-172.17.0.3-1731710293246 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:50,418 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data4/current/BP-1285903154-172.17.0.3-1731710293246 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:50,418 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:38:50,421 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f03ce36{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:50,422 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@cbccde7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:38:50,422 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:38:50,422 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3df3f65e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:38:50,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bc7279c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir/,STOPPED} 2024-11-15T22:38:50,424 WARN [BP-1285903154-172.17.0.3-1731710293246 heartbeating to localhost/127.0.0.1:41597 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:38:50,424 WARN [BP-1285903154-172.17.0.3-1731710293246 heartbeating to localhost/127.0.0.1:41597 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1285903154-172.17.0.3-1731710293246 (Datanode Uuid 6f378189-230f-48c9-8213-c8e5fdaa10d0) service to localhost/127.0.0.1:41597 2024-11-15T22:38:50,424 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:38:50,424 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:38:50,425 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data1/current/BP-1285903154-172.17.0.3-1731710293246 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:50,425 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/cluster_f815bb99-b78d-6333-7819-b5ff198e3465/data/data2/current/BP-1285903154-172.17.0.3-1731710293246 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:38:50,425 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:38:50,429 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62bbed65{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T22:38:50,430 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2940e1de{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:38:50,430 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:38:50,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42aa99e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:38:50,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7091f2a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir/,STOPPED} 2024-11-15T22:38:50,436 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T22:38:50,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T22:38:50,462 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=182 (was 157) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41597 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:41597 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41597 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41597 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41597 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41597 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41597 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41597 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=117 (was 112) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3996 (was 4164) 2024-11-15T22:38:50,469 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=182, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=117, ProcessCount=11, AvailableMemoryMB=3996 2024-11-15T22:38:50,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T22:38:50,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.log.dir so I do NOT create it in target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884 2024-11-15T22:38:50,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fdbd04be-233d-d700-472a-bd79bc724e34/hadoop.tmp.dir so I do NOT create it in target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884 2024-11-15T22:38:50,469 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/cluster_199dd95a-bbc3-d47f-4dc5-ea929677a5e4, deleteOnExit=true 2024-11-15T22:38:50,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T22:38:50,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/test.cache.data in system properties and HBase conf 2024-11-15T22:38:50,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T22:38:50,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/hadoop.log.dir in system properties and HBase conf 2024-11-15T22:38:50,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T22:38:50,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T22:38:50,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T22:38:50,470 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T22:38:50,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T22:38:50,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T22:38:50,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T22:38:50,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T22:38:50,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T22:38:50,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T22:38:50,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T22:38:50,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T22:38:50,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T22:38:50,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/nfs.dump.dir in system properties and HBase conf 2024-11-15T22:38:50,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/java.io.tmpdir in system properties and HBase conf 2024-11-15T22:38:50,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T22:38:50,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T22:38:50,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T22:38:50,484 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T22:38:50,834 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:38:50,838 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:38:50,839 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:38:50,839 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:38:50,839 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T22:38:50,840 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:38:50,840 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c41fb6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:38:50,840 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b8edabe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:38:50,933 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4d36967f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/java.io.tmpdir/jetty-localhost-36037-hadoop-hdfs-3_4_1-tests_jar-_-any-2149384728014396474/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T22:38:50,933 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4d389c80{HTTP/1.1, (http/1.1)}{localhost:36037} 2024-11-15T22:38:50,933 INFO [Time-limited test {}] server.Server(415): Started @189091ms 2024-11-15T22:38:50,943 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T22:38:51,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:51,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:51,278 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:38:51,281 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:38:51,282 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:38:51,282 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:38:51,282 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:38:51,282 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10c2896a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:38:51,282 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bd1d692{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:38:51,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2526c219{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/java.io.tmpdir/jetty-localhost-43457-hadoop-hdfs-3_4_1-tests_jar-_-any-4970249930028361975/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:51,375 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@74e6f5d9{HTTP/1.1, (http/1.1)}{localhost:43457} 2024-11-15T22:38:51,375 INFO [Time-limited test {}] server.Server(415): Started @189532ms 2024-11-15T22:38:51,376 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:38:51,399 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:38:51,402 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:38:51,402 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:38:51,402 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:38:51,403 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:38:51,403 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5894e22d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:38:51,403 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41bc1801{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:38:51,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5cdf1f1c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/java.io.tmpdir/jetty-localhost-39449-hadoop-hdfs-3_4_1-tests_jar-_-any-2184242874523708531/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:38:51,497 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51e1410e{HTTP/1.1, (http/1.1)}{localhost:39449} 2024-11-15T22:38:51,497 INFO [Time-limited test {}] server.Server(415): Started @189655ms 2024-11-15T22:38:51,498 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:38:52,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:52,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:52,553 WARN [Thread-1647 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/cluster_199dd95a-bbc3-d47f-4dc5-ea929677a5e4/data/data1/current/BP-2142243783-172.17.0.3-1731710330493/current, will proceed with Du for space computation calculation, 2024-11-15T22:38:52,553 WARN [Thread-1648 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/cluster_199dd95a-bbc3-d47f-4dc5-ea929677a5e4/data/data2/current/BP-2142243783-172.17.0.3-1731710330493/current, will proceed with Du for space computation calculation, 2024-11-15T22:38:52,577 WARN [Thread-1611 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:38:52,579 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94c5b66b17cc761a with lease ID 0x92f52151389aab4a: Processing first storage report for DS-25c47799-e445-41c7-8f1d-638900f101f6 from datanode DatanodeRegistration(127.0.0.1:34321, datanodeUuid=624e525e-2171-49e7-97e6-941e7a16f48a, infoPort=46671, infoSecurePort=0, ipcPort=45935, storageInfo=lv=-57;cid=testClusterID;nsid=1950595808;c=1731710330493) 2024-11-15T22:38:52,579 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94c5b66b17cc761a with lease ID 0x92f52151389aab4a: from storage DS-25c47799-e445-41c7-8f1d-638900f101f6 node DatanodeRegistration(127.0.0.1:34321, datanodeUuid=624e525e-2171-49e7-97e6-941e7a16f48a, infoPort=46671, infoSecurePort=0, ipcPort=45935, storageInfo=lv=-57;cid=testClusterID;nsid=1950595808;c=1731710330493), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:38:52,579 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94c5b66b17cc761a with lease ID 0x92f52151389aab4a: Processing first storage report for DS-b958315b-0035-428c-8030-7a0b25a7a162 from datanode DatanodeRegistration(127.0.0.1:34321, datanodeUuid=624e525e-2171-49e7-97e6-941e7a16f48a, infoPort=46671, infoSecurePort=0, ipcPort=45935, storageInfo=lv=-57;cid=testClusterID;nsid=1950595808;c=1731710330493) 2024-11-15T22:38:52,579 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94c5b66b17cc761a with lease ID 0x92f52151389aab4a: from storage DS-b958315b-0035-428c-8030-7a0b25a7a162 node DatanodeRegistration(127.0.0.1:34321, datanodeUuid=624e525e-2171-49e7-97e6-941e7a16f48a, infoPort=46671, infoSecurePort=0, ipcPort=45935, storageInfo=lv=-57;cid=testClusterID;nsid=1950595808;c=1731710330493), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:38:52,702 WARN [Thread-1658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/cluster_199dd95a-bbc3-d47f-4dc5-ea929677a5e4/data/data3/current/BP-2142243783-172.17.0.3-1731710330493/current, will proceed with Du for space computation calculation, 2024-11-15T22:38:52,702 WARN [Thread-1659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/cluster_199dd95a-bbc3-d47f-4dc5-ea929677a5e4/data/data4/current/BP-2142243783-172.17.0.3-1731710330493/current, will proceed with Du for space computation calculation, 2024-11-15T22:38:52,721 WARN [Thread-1634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:38:52,724 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcea59e0e015b4e8a with lease ID 0x92f52151389aab4b: Processing first storage report for DS-9dad6ef5-96d7-479e-a468-699b8e3dfc24 from datanode DatanodeRegistration(127.0.0.1:42195, datanodeUuid=1afffb5e-4f1a-45aa-8f7a-a4f980faf30d, infoPort=37951, infoSecurePort=0, ipcPort=34675, storageInfo=lv=-57;cid=testClusterID;nsid=1950595808;c=1731710330493) 2024-11-15T22:38:52,724 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcea59e0e015b4e8a with lease ID 0x92f52151389aab4b: from storage DS-9dad6ef5-96d7-479e-a468-699b8e3dfc24 node DatanodeRegistration(127.0.0.1:42195, datanodeUuid=1afffb5e-4f1a-45aa-8f7a-a4f980faf30d, infoPort=37951, infoSecurePort=0, ipcPort=34675, storageInfo=lv=-57;cid=testClusterID;nsid=1950595808;c=1731710330493), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:38:52,724 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcea59e0e015b4e8a with lease ID 0x92f52151389aab4b: Processing first storage report for DS-346f5056-fb35-42bf-b92f-7603a434eecb from datanode DatanodeRegistration(127.0.0.1:42195, datanodeUuid=1afffb5e-4f1a-45aa-8f7a-a4f980faf30d, infoPort=37951, infoSecurePort=0, ipcPort=34675, storageInfo=lv=-57;cid=testClusterID;nsid=1950595808;c=1731710330493) 2024-11-15T22:38:52,724 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcea59e0e015b4e8a with lease ID 0x92f52151389aab4b: from storage DS-346f5056-fb35-42bf-b92f-7603a434eecb node DatanodeRegistration(127.0.0.1:42195, datanodeUuid=1afffb5e-4f1a-45aa-8f7a-a4f980faf30d, infoPort=37951, infoSecurePort=0, ipcPort=34675, storageInfo=lv=-57;cid=testClusterID;nsid=1950595808;c=1731710330493), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:38:52,731 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884 2024-11-15T22:38:52,734 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/cluster_199dd95a-bbc3-d47f-4dc5-ea929677a5e4/zookeeper_0, clientPort=59371, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/cluster_199dd95a-bbc3-d47f-4dc5-ea929677a5e4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/cluster_199dd95a-bbc3-d47f-4dc5-ea929677a5e4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T22:38:52,735 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59371 2024-11-15T22:38:52,735 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:52,736 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:52,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741825_1001 (size=7) 2024-11-15T22:38:52,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741825_1001 (size=7) 2024-11-15T22:38:52,746 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d with version=8 2024-11-15T22:38:52,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/hbase-staging 2024-11-15T22:38:52,748 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e611192d6313:0 server-side Connection retries=45 2024-11-15T22:38:52,748 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:38:52,748 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T22:38:52,748 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T22:38:52,748 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:38:52,748 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T22:38:52,749 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T22:38:52,749 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T22:38:52,749 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37993 2024-11-15T22:38:52,751 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37993 connecting to ZooKeeper ensemble=127.0.0.1:59371 2024-11-15T22:38:52,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:379930x0, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T22:38:52,795 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37993-0x10140a6aae80000 connected 2024-11-15T22:38:52,883 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:52,885 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:52,889 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:38:52,889 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d, hbase.cluster.distributed=false 2024-11-15T22:38:52,891 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T22:38:52,891 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37993 2024-11-15T22:38:52,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37993 2024-11-15T22:38:52,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37993 2024-11-15T22:38:52,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37993 2024-11-15T22:38:52,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37993 2024-11-15T22:38:52,908 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e611192d6313:0 server-side Connection retries=45 2024-11-15T22:38:52,908 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:38:52,908 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T22:38:52,908 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T22:38:52,908 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:38:52,908 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T22:38:52,908 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T22:38:52,908 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T22:38:52,909 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41001 2024-11-15T22:38:52,911 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41001 connecting to ZooKeeper ensemble=127.0.0.1:59371 2024-11-15T22:38:52,911 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:52,913 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:52,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:410010x0, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T22:38:52,925 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:410010x0, quorum=127.0.0.1:59371, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:38:52,925 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41001-0x10140a6aae80001 connected 2024-11-15T22:38:52,925 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T22:38:52,926 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T22:38:52,927 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T22:38:52,928 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T22:38:52,929 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41001 2024-11-15T22:38:52,929 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41001 2024-11-15T22:38:52,929 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41001 2024-11-15T22:38:52,930 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41001 2024-11-15T22:38:52,930 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41001 2024-11-15T22:38:52,943 DEBUG [M:0;e611192d6313:37993 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e611192d6313:37993 2024-11-15T22:38:52,943 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e611192d6313,37993,1731710332748 2024-11-15T22:38:52,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:38:52,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:38:52,957 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e611192d6313,37993,1731710332748 2024-11-15T22:38:52,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:52,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T22:38:52,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:52,967 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T22:38:52,968 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e611192d6313,37993,1731710332748 from backup master directory 2024-11-15T22:38:52,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e611192d6313,37993,1731710332748 2024-11-15T22:38:52,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:38:52,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:38:52,977 WARN [master/e611192d6313:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T22:38:52,977 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e611192d6313,37993,1731710332748 2024-11-15T22:38:52,985 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/hbase.id] with ID: 76913f1e-a792-43e7-989d-a15f4e4fcc50 2024-11-15T22:38:52,985 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/.tmp/hbase.id 2024-11-15T22:38:52,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741826_1002 (size=42) 2024-11-15T22:38:52,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741826_1002 (size=42) 2024-11-15T22:38:52,992 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/.tmp/hbase.id]:[hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/hbase.id] 2024-11-15T22:38:53,002 INFO [master/e611192d6313:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:53,002 INFO [master/e611192d6313:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T22:38:53,003 INFO [master/e611192d6313:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-15T22:38:53,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:53,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:53,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741827_1003 (size=196) 2024-11-15T22:38:53,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741827_1003 (size=196) 2024-11-15T22:38:53,022 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T22:38:53,023 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T22:38:53,023 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:38:53,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741828_1004 (size=1189) 2024-11-15T22:38:53,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741828_1004 (size=1189) 2024-11-15T22:38:53,030 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store 2024-11-15T22:38:53,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741829_1005 (size=34) 2024-11-15T22:38:53,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741829_1005 (size=34) 2024-11-15T22:38:53,037 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:38:53,037 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T22:38:53,037 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:53,037 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:53,037 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T22:38:53,037 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:53,037 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:38:53,037 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731710333037Disabling compacts and flushes for region at 1731710333037Disabling writes for close at 1731710333037Writing region close event to WAL at 1731710333037Closed at 1731710333037 2024-11-15T22:38:53,038 WARN [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/.initializing 2024-11-15T22:38:53,038 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/WALs/e611192d6313,37993,1731710332748 2024-11-15T22:38:53,041 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C37993%2C1731710332748, suffix=, logDir=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/WALs/e611192d6313,37993,1731710332748, archiveDir=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/oldWALs, maxLogs=10 2024-11-15T22:38:53,041 INFO [master/e611192d6313:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C37993%2C1731710332748.1731710333041 2024-11-15T22:38:53,046 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/WALs/e611192d6313,37993,1731710332748/e611192d6313%2C37993%2C1731710332748.1731710333041 2024-11-15T22:38:53,048 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46671:46671),(127.0.0.1/127.0.0.1:37951:37951)] 2024-11-15T22:38:53,049 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:38:53,049 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:38:53,049 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:53,049 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:53,050 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:53,051 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T22:38:53,051 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:53,052 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:53,052 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:53,053 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T22:38:53,053 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:53,053 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:38:53,053 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:53,054 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T22:38:53,054 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:53,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:38:53,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:53,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T22:38:53,055 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:53,056 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:38:53,056 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:53,056 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:53,057 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:53,058 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:53,058 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:53,058 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T22:38:53,059 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:38:53,061 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:38:53,062 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=841074, jitterRate=0.06948211789131165}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T22:38:53,062 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731710333049Initializing all the Stores at 1731710333050 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710333050Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710333050Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710333050Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710333050Cleaning up temporary data from old regions at 1731710333058 (+8 ms)Region opened successfully at 1731710333062 (+4 ms) 2024-11-15T22:38:53,063 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T22:38:53,066 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46a3c140, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e611192d6313/172.17.0.3:0 2024-11-15T22:38:53,067 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T22:38:53,067 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T22:38:53,067 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T22:38:53,067 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T22:38:53,068 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T22:38:53,068 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T22:38:53,068 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T22:38:53,070 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T22:38:53,071 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T22:38:53,082 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T22:38:53,083 INFO [master/e611192d6313:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T22:38:53,084 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T22:38:53,097 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T22:38:53,097 INFO [master/e611192d6313:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T22:38:53,099 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T22:38:53,107 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T22:38:53,109 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T22:38:53,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:53,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:53,118 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T22:38:53,123 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T22:38:53,135 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T22:38:53,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T22:38:53,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T22:38:53,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:53,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:53,146 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e611192d6313,37993,1731710332748, sessionid=0x10140a6aae80000, setting cluster-up flag (Was=false) 2024-11-15T22:38:53,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:53,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:53,198 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T22:38:53,200 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e611192d6313,37993,1731710332748 2024-11-15T22:38:53,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:53,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:53,255 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T22:38:53,259 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e611192d6313,37993,1731710332748 2024-11-15T22:38:53,263 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T22:38:53,265 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T22:38:53,266 INFO [master/e611192d6313:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T22:38:53,266 INFO [master/e611192d6313:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T22:38:53,266 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e611192d6313,37993,1731710332748 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T22:38:53,269 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:38:53,269 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:38:53,269 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:38:53,269 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:38:53,269 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e611192d6313:0, corePoolSize=10, maxPoolSize=10 2024-11-15T22:38:53,269 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:53,270 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e611192d6313:0, corePoolSize=2, maxPoolSize=2 2024-11-15T22:38:53,270 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:53,271 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731710363271 2024-11-15T22:38:53,271 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T22:38:53,271 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T22:38:53,271 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T22:38:53,271 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T22:38:53,271 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T22:38:53,271 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T22:38:53,272 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,272 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:38:53,272 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T22:38:53,272 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T22:38:53,272 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T22:38:53,272 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T22:38:53,273 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T22:38:53,273 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T22:38:53,273 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710333273,5,FailOnTimeoutGroup] 2024-11-15T22:38:53,273 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710333273,5,FailOnTimeoutGroup] 2024-11-15T22:38:53,273 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,274 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T22:38:53,274 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,274 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,274 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:53,274 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T22:38:53,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741831_1007 (size=1321) 2024-11-15T22:38:53,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741831_1007 (size=1321) 2024-11-15T22:38:53,281 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T22:38:53,281 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d 2024-11-15T22:38:53,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741832_1008 (size=32) 2024-11-15T22:38:53,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741832_1008 (size=32) 2024-11-15T22:38:53,288 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:38:53,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T22:38:53,290 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T22:38:53,290 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:53,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:53,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T22:38:53,292 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T22:38:53,292 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:53,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:53,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T22:38:53,294 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T22:38:53,294 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:53,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:53,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T22:38:53,295 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T22:38:53,295 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:53,296 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:53,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T22:38:53,297 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740 2024-11-15T22:38:53,297 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740 2024-11-15T22:38:53,298 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T22:38:53,298 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T22:38:53,299 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T22:38:53,300 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T22:38:53,302 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:38:53,302 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721510, jitterRate=-0.08255341649055481}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T22:38:53,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731710333288Initializing all the Stores at 1731710333289 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710333289Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710333289Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710333289Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710333289Cleaning up temporary data from old regions at 1731710333298 (+9 ms)Region opened successfully at 1731710333303 (+5 ms) 2024-11-15T22:38:53,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T22:38:53,303 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T22:38:53,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T22:38:53,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T22:38:53,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T22:38:53,303 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T22:38:53,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731710333303Disabling compacts and flushes for region at 1731710333303Disabling writes for close at 1731710333303Writing region close event to WAL at 1731710333303Closed at 1731710333303 2024-11-15T22:38:53,305 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:38:53,305 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T22:38:53,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T22:38:53,306 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T22:38:53,307 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T22:38:53,332 INFO [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(746): ClusterId : 76913f1e-a792-43e7-989d-a15f4e4fcc50 2024-11-15T22:38:53,332 DEBUG [RS:0;e611192d6313:41001 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T22:38:53,340 DEBUG [RS:0;e611192d6313:41001 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T22:38:53,340 DEBUG [RS:0;e611192d6313:41001 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T22:38:53,351 DEBUG [RS:0;e611192d6313:41001 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T22:38:53,351 DEBUG [RS:0;e611192d6313:41001 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73338645, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e611192d6313/172.17.0.3:0 2024-11-15T22:38:53,366 DEBUG [RS:0;e611192d6313:41001 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e611192d6313:41001 2024-11-15T22:38:53,366 INFO [RS:0;e611192d6313:41001 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T22:38:53,366 INFO [RS:0;e611192d6313:41001 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T22:38:53,366 DEBUG [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T22:38:53,366 INFO [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(2659): reportForDuty to master=e611192d6313,37993,1731710332748 with port=41001, startcode=1731710332908 2024-11-15T22:38:53,367 DEBUG [RS:0;e611192d6313:41001 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T22:38:53,368 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40775, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T22:38:53,369 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37993 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e611192d6313,41001,1731710332908 2024-11-15T22:38:53,369 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37993 {}] master.ServerManager(517): Registering regionserver=e611192d6313,41001,1731710332908 2024-11-15T22:38:53,370 DEBUG [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d 2024-11-15T22:38:53,370 DEBUG [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37049 2024-11-15T22:38:53,370 DEBUG [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T22:38:53,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:38:53,381 DEBUG [RS:0;e611192d6313:41001 {}] zookeeper.ZKUtil(111): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e611192d6313,41001,1731710332908 2024-11-15T22:38:53,381 WARN [RS:0;e611192d6313:41001 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T22:38:53,382 INFO [RS:0;e611192d6313:41001 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:38:53,382 DEBUG [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908 2024-11-15T22:38:53,382 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e611192d6313,41001,1731710332908] 2024-11-15T22:38:53,385 INFO [RS:0;e611192d6313:41001 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T22:38:53,387 INFO [RS:0;e611192d6313:41001 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T22:38:53,388 INFO [RS:0;e611192d6313:41001 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T22:38:53,388 INFO [RS:0;e611192d6313:41001 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,388 INFO [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T22:38:53,389 INFO [RS:0;e611192d6313:41001 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T22:38:53,389 INFO [RS:0;e611192d6313:41001 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,389 DEBUG [RS:0;e611192d6313:41001 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:53,389 DEBUG [RS:0;e611192d6313:41001 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:53,389 DEBUG [RS:0;e611192d6313:41001 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:53,389 DEBUG [RS:0;e611192d6313:41001 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:53,389 DEBUG [RS:0;e611192d6313:41001 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:53,389 DEBUG [RS:0;e611192d6313:41001 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e611192d6313:0, corePoolSize=2, maxPoolSize=2 2024-11-15T22:38:53,389 DEBUG [RS:0;e611192d6313:41001 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:53,389 DEBUG [RS:0;e611192d6313:41001 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:53,390 DEBUG [RS:0;e611192d6313:41001 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:53,390 DEBUG [RS:0;e611192d6313:41001 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:53,390 DEBUG [RS:0;e611192d6313:41001 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:53,390 DEBUG [RS:0;e611192d6313:41001 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:38:53,390 DEBUG [RS:0;e611192d6313:41001 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:38:53,390 DEBUG [RS:0;e611192d6313:41001 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:38:53,390 INFO [RS:0;e611192d6313:41001 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,390 INFO [RS:0;e611192d6313:41001 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,391 INFO [RS:0;e611192d6313:41001 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,391 INFO [RS:0;e611192d6313:41001 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,391 INFO [RS:0;e611192d6313:41001 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,391 INFO [RS:0;e611192d6313:41001 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,41001,1731710332908-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T22:38:53,409 INFO [RS:0;e611192d6313:41001 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T22:38:53,409 INFO [RS:0;e611192d6313:41001 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,41001,1731710332908-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,409 INFO [RS:0;e611192d6313:41001 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,409 INFO [RS:0;e611192d6313:41001 {}] regionserver.Replication(171): e611192d6313,41001,1731710332908 started 2024-11-15T22:38:53,420 INFO [RS:0;e611192d6313:41001 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,421 INFO [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(1482): Serving as e611192d6313,41001,1731710332908, RpcServer on e611192d6313/172.17.0.3:41001, sessionid=0x10140a6aae80001 2024-11-15T22:38:53,421 DEBUG [RS:0;e611192d6313:41001 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T22:38:53,421 DEBUG [RS:0;e611192d6313:41001 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e611192d6313,41001,1731710332908 2024-11-15T22:38:53,421 DEBUG [RS:0;e611192d6313:41001 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,41001,1731710332908' 2024-11-15T22:38:53,421 DEBUG [RS:0;e611192d6313:41001 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T22:38:53,421 DEBUG [RS:0;e611192d6313:41001 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T22:38:53,422 DEBUG [RS:0;e611192d6313:41001 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T22:38:53,422 DEBUG [RS:0;e611192d6313:41001 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T22:38:53,422 DEBUG [RS:0;e611192d6313:41001 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e611192d6313,41001,1731710332908 2024-11-15T22:38:53,422 DEBUG [RS:0;e611192d6313:41001 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,41001,1731710332908' 2024-11-15T22:38:53,422 DEBUG [RS:0;e611192d6313:41001 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T22:38:53,422 DEBUG [RS:0;e611192d6313:41001 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T22:38:53,423 DEBUG [RS:0;e611192d6313:41001 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T22:38:53,423 INFO [RS:0;e611192d6313:41001 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T22:38:53,423 INFO [RS:0;e611192d6313:41001 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T22:38:53,457 WARN [e611192d6313:37993 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T22:38:53,527 INFO [RS:0;e611192d6313:41001 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C41001%2C1731710332908, suffix=, logDir=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908, archiveDir=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/oldWALs, maxLogs=32 2024-11-15T22:38:53,528 INFO [RS:0;e611192d6313:41001 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C41001%2C1731710332908.1731710333528 2024-11-15T22:38:53,536 INFO [RS:0;e611192d6313:41001 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908/e611192d6313%2C41001%2C1731710332908.1731710333528 2024-11-15T22:38:53,540 DEBUG [RS:0;e611192d6313:41001 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37951:37951),(127.0.0.1/127.0.0.1:46671:46671)] 2024-11-15T22:38:53,707 DEBUG [e611192d6313:37993 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T22:38:53,708 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e611192d6313,41001,1731710332908 2024-11-15T22:38:53,709 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e611192d6313,41001,1731710332908, state=OPENING 2024-11-15T22:38:53,718 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T22:38:53,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:53,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:38:53,729 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:38:53,729 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T22:38:53,729 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:38:53,729 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e611192d6313,41001,1731710332908}] 2024-11-15T22:38:53,882 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T22:38:53,884 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48439, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T22:38:53,890 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T22:38:53,890 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:38:53,894 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C41001%2C1731710332908.meta, suffix=.meta, logDir=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908, archiveDir=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/oldWALs, maxLogs=32 2024-11-15T22:38:53,895 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C41001%2C1731710332908.meta.1731710333894.meta 2024-11-15T22:38:53,902 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908/e611192d6313%2C41001%2C1731710332908.meta.1731710333894.meta 2024-11-15T22:38:53,905 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T22:38:53,908 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37951:37951),(127.0.0.1/127.0.0.1:46671:46671)] 2024-11-15T22:38:53,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,908 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:38:53,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,909 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T22:38:53,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,909 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T22:38:53,909 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T22:38:53,909 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T22:38:53,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,909 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:38:53,909 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T22:38:53,909 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T22:38:53,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T22:38:53,911 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T22:38:53,911 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:53,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:53,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T22:38:53,913 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T22:38:53,913 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:53,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:53,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T22:38:53,914 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T22:38:53,914 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:53,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:53,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T22:38:53,915 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T22:38:53,915 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:53,916 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:38:53,916 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T22:38:53,916 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740 2024-11-15T22:38:53,917 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740 2024-11-15T22:38:53,918 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T22:38:53,918 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T22:38:53,919 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T22:38:53,920 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T22:38:53,920 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778927, jitterRate=-0.009543761610984802}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T22:38:53,921 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T22:38:53,921 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731710333909Writing region info on filesystem at 1731710333909Initializing all the Stores at 1731710333910 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710333910Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710333910Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710333910Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710333911 (+1 ms)Cleaning up temporary data from old regions at 1731710333918 (+7 ms)Running coprocessor post-open hooks at 1731710333921 (+3 ms)Region opened successfully at 1731710333921 2024-11-15T22:38:53,922 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731710333882 2024-11-15T22:38:53,924 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T22:38:53,924 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T22:38:53,925 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e611192d6313,41001,1731710332908 2024-11-15T22:38:53,926 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e611192d6313,41001,1731710332908, state=OPEN 2024-11-15T22:38:53,932 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,932 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,932 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,932 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,933 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,933 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:53,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T22:38:53,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T22:38:53,971 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e611192d6313,41001,1731710332908 2024-11-15T22:38:53,971 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:38:53,971 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:38:53,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T22:38:53,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e611192d6313,41001,1731710332908 in 242 msec 2024-11-15T22:38:53,977 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T22:38:53,977 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 670 msec 2024-11-15T22:38:53,978 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:38:53,978 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T22:38:53,979 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T22:38:53,979 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e611192d6313,41001,1731710332908, seqNum=-1] 2024-11-15T22:38:53,980 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T22:38:53,981 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41221, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T22:38:53,986 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 721 msec 2024-11-15T22:38:53,986 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731710333986, completionTime=-1 2024-11-15T22:38:53,986 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T22:38:53,986 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T22:38:53,988 INFO [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T22:38:53,988 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731710393988 2024-11-15T22:38:53,988 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731710453988 2024-11-15T22:38:53,988 INFO [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-15T22:38:53,989 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,37993,1731710332748-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,989 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,37993,1731710332748-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,989 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,37993,1731710332748-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,989 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e611192d6313:37993, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,989 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,989 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:53,991 DEBUG [master/e611192d6313:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T22:38:53,993 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.016sec 2024-11-15T22:38:53,993 INFO [master/e611192d6313:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T22:38:53,993 INFO [master/e611192d6313:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T22:38:53,993 INFO [master/e611192d6313:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T22:38:53,993 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T22:38:53,993 INFO [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T22:38:53,993 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,37993,1731710332748-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T22:38:53,993 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,37993,1731710332748-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T22:38:53,996 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T22:38:53,996 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T22:38:53,996 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,37993,1731710332748-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:38:54,033 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@394b54be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:38:54,033 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e611192d6313,37993,-1 for getting cluster id 2024-11-15T22:38:54,033 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T22:38:54,036 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '76913f1e-a792-43e7-989d-a15f4e4fcc50' 2024-11-15T22:38:54,037 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T22:38:54,038 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "76913f1e-a792-43e7-989d-a15f4e4fcc50" 2024-11-15T22:38:54,038 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@386d33ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:38:54,038 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e611192d6313,37993,-1] 2024-11-15T22:38:54,038 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T22:38:54,039 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:38:54,040 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48370, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T22:38:54,042 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@319a8be6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:38:54,043 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T22:38:54,044 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e611192d6313,41001,1731710332908, seqNum=-1] 2024-11-15T22:38:54,044 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T22:38:54,045 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43476, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T22:38:54,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e611192d6313,37993,1731710332748 2024-11-15T22:38:54,047 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:38:54,050 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T22:38:54,050 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T22:38:54,051 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is e611192d6313,37993,1731710332748 2024-11-15T22:38:54,051 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@46a5923 2024-11-15T22:38:54,051 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T22:38:54,052 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48382, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T22:38:54,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T22:38:54,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T22:38:54,053 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T22:38:54,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T22:38:54,057 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T22:38:54,057 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:54,057 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-15T22:38:54,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T22:38:54,058 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T22:38:54,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741835_1011 (size=405) 2024-11-15T22:38:54,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741835_1011 (size=405) 2024-11-15T22:38:54,067 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 687d743060cca9311035557cb4f2550f, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d 2024-11-15T22:38:54,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741836_1012 (size=88) 2024-11-15T22:38:54,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741836_1012 (size=88) 2024-11-15T22:38:54,073 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:38:54,073 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 687d743060cca9311035557cb4f2550f, disabling compactions & flushes 2024-11-15T22:38:54,073 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:38:54,073 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:38:54,073 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. after waiting 0 ms 2024-11-15T22:38:54,073 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:38:54,073 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:38:54,073 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 687d743060cca9311035557cb4f2550f: Waiting for close lock at 1731710334073Disabling compacts and flushes for region at 1731710334073Disabling writes for close at 1731710334073Writing region close event to WAL at 1731710334073Closed at 1731710334073 2024-11-15T22:38:54,075 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T22:38:54,075 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731710334075"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731710334075"}]},"ts":"1731710334075"} 2024-11-15T22:38:54,077 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T22:38:54,078 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T22:38:54,078 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731710334078"}]},"ts":"1731710334078"} 2024-11-15T22:38:54,081 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-15T22:38:54,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=687d743060cca9311035557cb4f2550f, ASSIGN}] 2024-11-15T22:38:54,082 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=687d743060cca9311035557cb4f2550f, ASSIGN 2024-11-15T22:38:54,083 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=687d743060cca9311035557cb4f2550f, ASSIGN; state=OFFLINE, location=e611192d6313,41001,1731710332908; forceNewPlan=false, retain=false 2024-11-15T22:38:54,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:54,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:54,235 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=687d743060cca9311035557cb4f2550f, regionState=OPENING, regionLocation=e611192d6313,41001,1731710332908 2024-11-15T22:38:54,240 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=687d743060cca9311035557cb4f2550f, ASSIGN because future has completed 2024-11-15T22:38:54,241 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 687d743060cca9311035557cb4f2550f, server=e611192d6313,41001,1731710332908}] 2024-11-15T22:38:54,404 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:38:54,404 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 687d743060cca9311035557cb4f2550f, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f.', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:38:54,405 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 687d743060cca9311035557cb4f2550f 2024-11-15T22:38:54,405 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:38:54,405 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 687d743060cca9311035557cb4f2550f 2024-11-15T22:38:54,405 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 687d743060cca9311035557cb4f2550f 2024-11-15T22:38:54,408 INFO [StoreOpener-687d743060cca9311035557cb4f2550f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 687d743060cca9311035557cb4f2550f 2024-11-15T22:38:54,410 INFO [StoreOpener-687d743060cca9311035557cb4f2550f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 687d743060cca9311035557cb4f2550f columnFamilyName info 2024-11-15T22:38:54,410 DEBUG [StoreOpener-687d743060cca9311035557cb4f2550f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:38:54,411 INFO [StoreOpener-687d743060cca9311035557cb4f2550f-1 {}] regionserver.HStore(327): Store=687d743060cca9311035557cb4f2550f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:38:54,411 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 687d743060cca9311035557cb4f2550f 2024-11-15T22:38:54,413 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f 2024-11-15T22:38:54,413 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f 2024-11-15T22:38:54,414 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 687d743060cca9311035557cb4f2550f 2024-11-15T22:38:54,414 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 687d743060cca9311035557cb4f2550f 2024-11-15T22:38:54,416 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 687d743060cca9311035557cb4f2550f 2024-11-15T22:38:54,418 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:38:54,419 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 687d743060cca9311035557cb4f2550f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=781356, jitterRate=-0.006454780697822571}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T22:38:54,419 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 687d743060cca9311035557cb4f2550f 2024-11-15T22:38:54,420 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 687d743060cca9311035557cb4f2550f: Running coprocessor pre-open hook at 1731710334405Writing region info on filesystem at 1731710334405Initializing all the Stores at 1731710334407 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710334407Cleaning up temporary data from old regions at 1731710334414 (+7 ms)Running coprocessor post-open hooks at 1731710334419 (+5 ms)Region opened successfully at 1731710334420 (+1 ms) 2024-11-15T22:38:54,421 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f., pid=6, masterSystemTime=1731710334395 2024-11-15T22:38:54,423 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:38:54,423 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:38:54,424 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=687d743060cca9311035557cb4f2550f, regionState=OPEN, openSeqNum=2, regionLocation=e611192d6313,41001,1731710332908 2024-11-15T22:38:54,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 687d743060cca9311035557cb4f2550f, server=e611192d6313,41001,1731710332908 because future has completed 2024-11-15T22:38:54,430 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T22:38:54,430 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 687d743060cca9311035557cb4f2550f, server=e611192d6313,41001,1731710332908 in 187 msec 2024-11-15T22:38:54,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T22:38:54,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=687d743060cca9311035557cb4f2550f, ASSIGN in 349 msec 2024-11-15T22:38:54,434 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T22:38:54,434 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731710334434"}]},"ts":"1731710334434"} 2024-11-15T22:38:54,436 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-15T22:38:54,437 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T22:38:54,439 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 384 msec 2024-11-15T22:38:55,111 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:55,111 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:56,112 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:56,112 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:57,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:57,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:58,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:58,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:58,402 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T22:38:58,402 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T22:38:58,404 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T22:38:58,404 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T22:38:58,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T22:38:58,405 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-15T22:38:58,406 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T22:38:58,406 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-15T22:38:59,116 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:59,116 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:38:59,415 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T22:38:59,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,419 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,451 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:38:59,456 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T22:38:59,457 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-15T22:39:00,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:00,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:01,119 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:01,119 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:02,121 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:02,121 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:03,122 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:03,122 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:04,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T22:39:04,090 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T22:39:04,090 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-15T22:39:04,096 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T22:39:04,096 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:39:04,101 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f., hostname=e611192d6313,41001,1731710332908, seqNum=2] 2024-11-15T22:39:04,109 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T22:39:04,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T22:39:04,114 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T22:39:04,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T22:39:04,115 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T22:39:04,116 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T22:39:04,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:04,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:04,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41001 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-15T22:39:04,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:39:04,281 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 687d743060cca9311035557cb4f2550f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T22:39:04,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/77222448777e4ac29597b92f1dcad130 is 1080, key is row0001/info:/1731710344102/Put/seqid=0 2024-11-15T22:39:04,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741837_1013 (size=6033) 2024-11-15T22:39:04,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741837_1013 (size=6033) 2024-11-15T22:39:04,301 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/77222448777e4ac29597b92f1dcad130 2024-11-15T22:39:04,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/77222448777e4ac29597b92f1dcad130 as hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/77222448777e4ac29597b92f1dcad130 2024-11-15T22:39:04,313 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/77222448777e4ac29597b92f1dcad130, entries=1, sequenceid=5, filesize=5.9 K 2024-11-15T22:39:04,315 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 687d743060cca9311035557cb4f2550f in 33ms, sequenceid=5, compaction requested=false 2024-11-15T22:39:04,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 687d743060cca9311035557cb4f2550f: 2024-11-15T22:39:04,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:39:04,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-15T22:39:04,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-15T22:39:04,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-15T22:39:04,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 203 msec 2024-11-15T22:39:04,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 212 msec 2024-11-15T22:39:05,124 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:05,124 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:06,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:06,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:07,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:07,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:08,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:08,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:09,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:09,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:10,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:10,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:11,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:11,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:12,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:12,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:13,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:13,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:14,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:14,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:14,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 after 68095ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:39:14,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta after 68084ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T22:39:14,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T22:39:14,201 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T22:39:14,209 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T22:39:14,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T22:39:14,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-15T22:39:14,212 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T22:39:14,213 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T22:39:14,213 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T22:39:14,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41001 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-15T22:39:14,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:39:14,368 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 687d743060cca9311035557cb4f2550f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T22:39:14,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/08cca41dc81c4ff896a5f55f9939c0b4 is 1080, key is row0002/info:/1731710354204/Put/seqid=0 2024-11-15T22:39:14,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741838_1014 (size=6033) 2024-11-15T22:39:14,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741838_1014 (size=6033) 2024-11-15T22:39:14,388 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/08cca41dc81c4ff896a5f55f9939c0b4 2024-11-15T22:39:14,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/08cca41dc81c4ff896a5f55f9939c0b4 as hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/08cca41dc81c4ff896a5f55f9939c0b4 2024-11-15T22:39:14,401 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/08cca41dc81c4ff896a5f55f9939c0b4, entries=1, sequenceid=9, filesize=5.9 K 2024-11-15T22:39:14,402 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 687d743060cca9311035557cb4f2550f in 34ms, sequenceid=9, compaction requested=false 2024-11-15T22:39:14,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 687d743060cca9311035557cb4f2550f: 2024-11-15T22:39:14,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:39:14,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-15T22:39:14,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-15T22:39:14,406 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-15T22:39:14,406 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 191 msec 2024-11-15T22:39:14,408 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 198 msec 2024-11-15T22:39:15,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:15,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:16,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:16,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:17,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:17,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:18,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:18,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:19,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:19,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:20,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:20,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:21,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:21,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:22,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:22,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:22,731 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T22:39:23,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:23,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:24,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:24,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:24,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-15T22:39:24,239 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T22:39:24,243 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C41001%2C1731710332908.1731710364243 2024-11-15T22:39:24,251 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:24,252 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:24,252 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:24,252 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:24,252 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:24,252 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908/e611192d6313%2C41001%2C1731710332908.1731710333528 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908/e611192d6313%2C41001%2C1731710332908.1731710364243 2024-11-15T22:39:24,254 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46671:46671),(127.0.0.1/127.0.0.1:37951:37951)] 2024-11-15T22:39:24,254 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908/e611192d6313%2C41001%2C1731710332908.1731710333528 is not closed yet, will try archiving it next time 2024-11-15T22:39:24,256 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T22:39:24,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741833_1009 (size=5546) 2024-11-15T22:39:24,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741833_1009 (size=5546) 2024-11-15T22:39:24,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T22:39:24,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-15T22:39:24,259 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T22:39:24,260 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T22:39:24,260 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T22:39:24,416 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41001 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-15T22:39:24,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:39:24,418 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 687d743060cca9311035557cb4f2550f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T22:39:24,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/b02f18a6c8884f29ac21d24ca05c285e is 1080, key is row0003/info:/1731710364241/Put/seqid=0 2024-11-15T22:39:24,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741840_1016 (size=6033) 2024-11-15T22:39:24,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741840_1016 (size=6033) 2024-11-15T22:39:24,435 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/b02f18a6c8884f29ac21d24ca05c285e 2024-11-15T22:39:24,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/b02f18a6c8884f29ac21d24ca05c285e as hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/b02f18a6c8884f29ac21d24ca05c285e 2024-11-15T22:39:24,448 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/b02f18a6c8884f29ac21d24ca05c285e, entries=1, sequenceid=13, filesize=5.9 K 2024-11-15T22:39:24,449 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 687d743060cca9311035557cb4f2550f in 32ms, sequenceid=13, compaction requested=true 2024-11-15T22:39:24,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 687d743060cca9311035557cb4f2550f: 2024-11-15T22:39:24,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:39:24,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-15T22:39:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-15T22:39:24,453 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-15T22:39:24,453 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 190 msec 2024-11-15T22:39:24,455 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 198 msec 2024-11-15T22:39:25,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:25,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:26,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:26,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:27,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:27,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:28,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:28,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:29,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:29,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:30,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:30,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:31,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:31,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:32,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:32,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:33,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:33,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:33,997 INFO [master/e611192d6313:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T22:39:33,997 INFO [master/e611192d6313:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T22:39:34,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:34,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:34,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-15T22:39:34,271 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T22:39:34,271 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T22:39:34,275 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T22:39:34,275 DEBUG [Time-limited test {}] regionserver.HStore(1541): 687d743060cca9311035557cb4f2550f/info is initiating minor compaction (all files) 2024-11-15T22:39:34,276 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T22:39:34,276 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:34,276 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 687d743060cca9311035557cb4f2550f/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:39:34,277 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/77222448777e4ac29597b92f1dcad130, hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/08cca41dc81c4ff896a5f55f9939c0b4, hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/b02f18a6c8884f29ac21d24ca05c285e] into tmpdir=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp, totalSize=17.7 K 2024-11-15T22:39:34,278 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 77222448777e4ac29597b92f1dcad130, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731710344102 2024-11-15T22:39:34,279 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 08cca41dc81c4ff896a5f55f9939c0b4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731710354204 2024-11-15T22:39:34,280 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting b02f18a6c8884f29ac21d24ca05c285e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731710364241 2024-11-15T22:39:34,295 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 687d743060cca9311035557cb4f2550f#info#compaction#46 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:39:34,296 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/d986593b5eb0412fa304b0838cfbc6e7 is 1080, key is row0001/info:/1731710344102/Put/seqid=0 2024-11-15T22:39:34,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741841_1017 (size=8296) 2024-11-15T22:39:34,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741841_1017 (size=8296) 2024-11-15T22:39:34,308 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/d986593b5eb0412fa304b0838cfbc6e7 as hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/d986593b5eb0412fa304b0838cfbc6e7 2024-11-15T22:39:34,314 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 687d743060cca9311035557cb4f2550f/info of 687d743060cca9311035557cb4f2550f into d986593b5eb0412fa304b0838cfbc6e7(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:39:34,314 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 687d743060cca9311035557cb4f2550f: 2024-11-15T22:39:34,317 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C41001%2C1731710332908.1731710374316 2024-11-15T22:39:34,323 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:34,323 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:34,323 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:34,323 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:34,323 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:34,323 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908/e611192d6313%2C41001%2C1731710332908.1731710364243 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908/e611192d6313%2C41001%2C1731710332908.1731710374316 2024-11-15T22:39:34,324 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46671:46671),(127.0.0.1/127.0.0.1:37951:37951)] 2024-11-15T22:39:34,324 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908/e611192d6313%2C41001%2C1731710332908.1731710364243 is not closed yet, will try archiving it next time 2024-11-15T22:39:34,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741839_1015 (size=2520) 2024-11-15T22:39:34,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741839_1015 (size=2520) 2024-11-15T22:39:34,326 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908/e611192d6313%2C41001%2C1731710332908.1731710333528 to hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/oldWALs/e611192d6313%2C41001%2C1731710332908.1731710333528 2024-11-15T22:39:34,327 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T22:39:34,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T22:39:34,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-15T22:39:34,329 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T22:39:34,330 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T22:39:34,330 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T22:39:34,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41001 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-15T22:39:34,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:39:34,484 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 687d743060cca9311035557cb4f2550f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T22:39:34,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/f19198d81dbe4ed4b09dae15e3f427d0 is 1080, key is row0000/info:/1731710374315/Put/seqid=0 2024-11-15T22:39:34,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741843_1019 (size=6033) 2024-11-15T22:39:34,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741843_1019 (size=6033) 2024-11-15T22:39:34,500 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/f19198d81dbe4ed4b09dae15e3f427d0 2024-11-15T22:39:34,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/f19198d81dbe4ed4b09dae15e3f427d0 as hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/f19198d81dbe4ed4b09dae15e3f427d0 2024-11-15T22:39:34,514 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/f19198d81dbe4ed4b09dae15e3f427d0, entries=1, sequenceid=18, filesize=5.9 K 2024-11-15T22:39:34,515 INFO [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 687d743060cca9311035557cb4f2550f in 31ms, sequenceid=18, compaction requested=false 2024-11-15T22:39:34,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 687d743060cca9311035557cb4f2550f: 2024-11-15T22:39:34,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:39:34,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-15T22:39:34,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-15T22:39:34,519 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-15T22:39:34,519 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 187 msec 2024-11-15T22:39:34,521 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 193 msec 2024-11-15T22:39:35,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:35,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:36,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:36,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:37,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:37,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:38,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:38,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:39,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:39,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:39,405 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 687d743060cca9311035557cb4f2550f, had cached 0 bytes from a total of 14329 2024-11-15T22:39:40,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:40,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:41,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:41,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:42,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:42,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:43,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:43,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:44,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:44,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:44,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-15T22:39:44,429 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T22:39:44,432 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C41001%2C1731710332908.1731710384432 2024-11-15T22:39:44,441 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:44,441 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:44,442 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:44,442 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:44,442 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:44,442 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908/e611192d6313%2C41001%2C1731710332908.1731710374316 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908/e611192d6313%2C41001%2C1731710332908.1731710384432 2024-11-15T22:39:44,443 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37951:37951),(127.0.0.1/127.0.0.1:46671:46671)] 2024-11-15T22:39:44,443 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908/e611192d6313%2C41001%2C1731710332908.1731710374316 is not closed yet, will try archiving it next time 2024-11-15T22:39:44,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741842_1018 (size=2026) 2024-11-15T22:39:44,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T22:39:44,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741842_1018 (size=2026) 2024-11-15T22:39:44,444 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T22:39:44,444 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/WALs/e611192d6313,41001,1731710332908/e611192d6313%2C41001%2C1731710332908.1731710364243 to hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/oldWALs/e611192d6313%2C41001%2C1731710332908.1731710364243 2024-11-15T22:39:44,444 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:39:44,444 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:39:44,444 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:39:44,444 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T22:39:44,444 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=341958511, stopped=false 2024-11-15T22:39:44,444 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e611192d6313,37993,1731710332748 2024-11-15T22:39:44,445 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T22:39:44,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T22:39:44,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T22:39:44,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:39:44,516 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T22:39:44,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:39:44,516 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T22:39:44,517 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:39:44,517 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:39:44,517 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:39:44,517 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:39:44,518 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e611192d6313,41001,1731710332908' ***** 2024-11-15T22:39:44,518 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T22:39:44,519 INFO [RS:0;e611192d6313:41001 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T22:39:44,519 INFO [RS:0;e611192d6313:41001 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T22:39:44,519 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T22:39:44,519 INFO [RS:0;e611192d6313:41001 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T22:39:44,519 INFO [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(3091): Received CLOSE for 687d743060cca9311035557cb4f2550f 2024-11-15T22:39:44,520 INFO [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(959): stopping server e611192d6313,41001,1731710332908 2024-11-15T22:39:44,520 INFO [RS:0;e611192d6313:41001 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T22:39:44,520 INFO [RS:0;e611192d6313:41001 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e611192d6313:41001. 2024-11-15T22:39:44,520 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 687d743060cca9311035557cb4f2550f, disabling compactions & flushes 2024-11-15T22:39:44,520 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:39:44,520 DEBUG [RS:0;e611192d6313:41001 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:39:44,520 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:39:44,520 DEBUG [RS:0;e611192d6313:41001 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:39:44,520 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. after waiting 0 ms 2024-11-15T22:39:44,520 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:39:44,520 INFO [RS:0;e611192d6313:41001 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T22:39:44,520 INFO [RS:0;e611192d6313:41001 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T22:39:44,520 INFO [RS:0;e611192d6313:41001 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T22:39:44,520 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 687d743060cca9311035557cb4f2550f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T22:39:44,521 INFO [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T22:39:44,521 INFO [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T22:39:44,521 DEBUG [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 687d743060cca9311035557cb4f2550f=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f.} 2024-11-15T22:39:44,521 DEBUG [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 687d743060cca9311035557cb4f2550f 2024-11-15T22:39:44,521 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T22:39:44,521 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T22:39:44,521 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T22:39:44,521 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T22:39:44,521 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T22:39:44,522 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-15T22:39:44,526 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/69d0a56f171f4d5287c17a3d81a2f245 is 1080, key is row0001/info:/1731710384430/Put/seqid=0 2024-11-15T22:39:44,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741845_1021 (size=6033) 2024-11-15T22:39:44,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741845_1021 (size=6033) 2024-11-15T22:39:44,533 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/69d0a56f171f4d5287c17a3d81a2f245 2024-11-15T22:39:44,539 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/.tmp/info/69d0a56f171f4d5287c17a3d81a2f245 as hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/69d0a56f171f4d5287c17a3d81a2f245 2024-11-15T22:39:44,542 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/.tmp/info/e2c6350f63f5482db50e26eb4fbab7f0 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f./info:regioninfo/1731710334424/Put/seqid=0 2024-11-15T22:39:44,545 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/69d0a56f171f4d5287c17a3d81a2f245, entries=1, sequenceid=22, filesize=5.9 K 2024-11-15T22:39:44,546 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 687d743060cca9311035557cb4f2550f in 26ms, sequenceid=22, compaction requested=true 2024-11-15T22:39:44,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741846_1022 (size=7308) 2024-11-15T22:39:44,546 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/77222448777e4ac29597b92f1dcad130, hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/08cca41dc81c4ff896a5f55f9939c0b4, hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/b02f18a6c8884f29ac21d24ca05c285e] to archive 2024-11-15T22:39:44,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741846_1022 (size=7308) 2024-11-15T22:39:44,547 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/.tmp/info/e2c6350f63f5482db50e26eb4fbab7f0 2024-11-15T22:39:44,547 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T22:39:44,549 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/77222448777e4ac29597b92f1dcad130 to hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/77222448777e4ac29597b92f1dcad130 2024-11-15T22:39:44,550 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/08cca41dc81c4ff896a5f55f9939c0b4 to hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/08cca41dc81c4ff896a5f55f9939c0b4 2024-11-15T22:39:44,551 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/b02f18a6c8884f29ac21d24ca05c285e to hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/info/b02f18a6c8884f29ac21d24ca05c285e 2024-11-15T22:39:44,551 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e611192d6313:37993 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-15T22:39:44,551 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [77222448777e4ac29597b92f1dcad130=6033, 08cca41dc81c4ff896a5f55f9939c0b4=6033, b02f18a6c8884f29ac21d24ca05c285e=6033] 2024-11-15T22:39:44,555 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/687d743060cca9311035557cb4f2550f/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-15T22:39:44,556 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:39:44,556 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 687d743060cca9311035557cb4f2550f: Waiting for close lock at 1731710384520Running coprocessor pre-close hooks at 1731710384520Disabling compacts and flushes for region at 1731710384520Disabling writes for close at 1731710384520Obtaining lock to block concurrent updates at 1731710384521 (+1 ms)Preparing flush snapshotting stores in 687d743060cca9311035557cb4f2550f at 1731710384521Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731710384521Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. at 1731710384522 (+1 ms)Flushing 687d743060cca9311035557cb4f2550f/info: creating writer at 1731710384522Flushing 687d743060cca9311035557cb4f2550f/info: appending metadata at 1731710384525 (+3 ms)Flushing 687d743060cca9311035557cb4f2550f/info: closing flushed file at 1731710384525Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@426aedf3: reopening flushed file at 1731710384538 (+13 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 687d743060cca9311035557cb4f2550f in 26ms, sequenceid=22, compaction requested=true at 1731710384546 (+8 ms)Writing region close event to WAL at 1731710384552 (+6 ms)Running coprocessor post-close hooks at 1731710384555 (+3 ms)Closed at 1731710384556 (+1 ms) 2024-11-15T22:39:44,556 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731710334053.687d743060cca9311035557cb4f2550f. 2024-11-15T22:39:44,567 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/.tmp/ns/605273977c4d479fb3bfa5d92546cef5 is 43, key is default/ns:d/1731710333981/Put/seqid=0 2024-11-15T22:39:44,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741847_1023 (size=5153) 2024-11-15T22:39:44,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741847_1023 (size=5153) 2024-11-15T22:39:44,572 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/.tmp/ns/605273977c4d479fb3bfa5d92546cef5 2024-11-15T22:39:44,591 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/.tmp/table/028d2e581a2a42adb050bc0d504e769f is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731710334434/Put/seqid=0 2024-11-15T22:39:44,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741848_1024 (size=5508) 2024-11-15T22:39:44,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741848_1024 (size=5508) 2024-11-15T22:39:44,596 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/.tmp/table/028d2e581a2a42adb050bc0d504e769f 2024-11-15T22:39:44,601 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/.tmp/info/e2c6350f63f5482db50e26eb4fbab7f0 as hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/info/e2c6350f63f5482db50e26eb4fbab7f0 2024-11-15T22:39:44,606 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/info/e2c6350f63f5482db50e26eb4fbab7f0, entries=10, sequenceid=11, filesize=7.1 K 2024-11-15T22:39:44,607 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/.tmp/ns/605273977c4d479fb3bfa5d92546cef5 as hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/ns/605273977c4d479fb3bfa5d92546cef5 2024-11-15T22:39:44,613 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/ns/605273977c4d479fb3bfa5d92546cef5, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T22:39:44,614 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/.tmp/table/028d2e581a2a42adb050bc0d504e769f as hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/table/028d2e581a2a42adb050bc0d504e769f 2024-11-15T22:39:44,620 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/table/028d2e581a2a42adb050bc0d504e769f, entries=2, sequenceid=11, filesize=5.4 K 2024-11-15T22:39:44,621 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false 2024-11-15T22:39:44,626 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T22:39:44,626 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T22:39:44,626 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T22:39:44,626 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731710384521Running coprocessor pre-close hooks at 1731710384521Disabling compacts and flushes for region at 1731710384521Disabling writes for close at 1731710384521Obtaining lock to block concurrent updates at 1731710384522 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731710384522Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731710384522Flushing stores of hbase:meta,,1.1588230740 at 1731710384523 (+1 ms)Flushing 1588230740/info: creating writer at 1731710384523Flushing 1588230740/info: appending metadata at 1731710384541 (+18 ms)Flushing 1588230740/info: closing flushed file at 1731710384541Flushing 1588230740/ns: creating writer at 1731710384553 (+12 ms)Flushing 1588230740/ns: appending metadata at 1731710384567 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731710384567Flushing 1588230740/table: creating writer at 1731710384576 (+9 ms)Flushing 1588230740/table: appending metadata at 1731710384591 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731710384591Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e8df8cd: reopening flushed file at 1731710384601 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c0e0e99: reopening flushed file at 1731710384607 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@493c7c0d: reopening flushed file at 1731710384613 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false at 1731710384621 (+8 ms)Writing region close event to WAL at 1731710384622 (+1 ms)Running coprocessor post-close hooks at 1731710384626 (+4 ms)Closed at 1731710384626 2024-11-15T22:39:44,626 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T22:39:44,721 INFO [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(976): stopping server e611192d6313,41001,1731710332908; all regions closed. 2024-11-15T22:39:44,722 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:44,722 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:44,722 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:44,722 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:44,722 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:44,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741834_1010 (size=3306) 2024-11-15T22:39:44,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741834_1010 (size=3306) 2024-11-15T22:39:44,727 DEBUG [RS:0;e611192d6313:41001 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/oldWALs 2024-11-15T22:39:44,727 INFO [RS:0;e611192d6313:41001 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e611192d6313%2C41001%2C1731710332908.meta:.meta(num 1731710333894) 2024-11-15T22:39:44,728 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:44,728 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:44,728 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:44,728 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:44,728 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:44,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741844_1020 (size=1252) 2024-11-15T22:39:44,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741844_1020 (size=1252) 2024-11-15T22:39:44,734 DEBUG [RS:0;e611192d6313:41001 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/oldWALs 2024-11-15T22:39:44,734 INFO [RS:0;e611192d6313:41001 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e611192d6313%2C41001%2C1731710332908:(num 1731710384432) 2024-11-15T22:39:44,734 DEBUG [RS:0;e611192d6313:41001 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:39:44,735 INFO [RS:0;e611192d6313:41001 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T22:39:44,735 INFO [RS:0;e611192d6313:41001 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T22:39:44,735 INFO [RS:0;e611192d6313:41001 {}] hbase.ChoreService(370): Chore service for: regionserver/e611192d6313:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-15T22:39:44,735 INFO [RS:0;e611192d6313:41001 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T22:39:44,735 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T22:39:44,735 INFO [RS:0;e611192d6313:41001 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41001 2024-11-15T22:39:44,771 INFO [RS:0;e611192d6313:41001 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T22:39:44,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:39:44,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e611192d6313,41001,1731710332908 2024-11-15T22:39:44,784 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e611192d6313,41001,1731710332908] 2024-11-15T22:39:44,794 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e611192d6313,41001,1731710332908 already deleted, retry=false 2024-11-15T22:39:44,794 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e611192d6313,41001,1731710332908 expired; onlineServers=0 2024-11-15T22:39:44,794 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e611192d6313,37993,1731710332748' ***** 2024-11-15T22:39:44,794 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T22:39:44,794 INFO [M:0;e611192d6313:37993 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T22:39:44,794 INFO [M:0;e611192d6313:37993 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T22:39:44,794 DEBUG [M:0;e611192d6313:37993 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T22:39:44,794 DEBUG [M:0;e611192d6313:37993 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T22:39:44,795 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T22:39:44,795 DEBUG [master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710333273 {}] cleaner.HFileCleaner(306): Exit Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710333273,5,FailOnTimeoutGroup] 2024-11-15T22:39:44,795 DEBUG [master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710333273 {}] cleaner.HFileCleaner(306): Exit Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710333273,5,FailOnTimeoutGroup] 2024-11-15T22:39:44,795 INFO [M:0;e611192d6313:37993 {}] hbase.ChoreService(370): Chore service for: master/e611192d6313:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T22:39:44,795 INFO [M:0;e611192d6313:37993 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T22:39:44,795 DEBUG [M:0;e611192d6313:37993 {}] master.HMaster(1795): Stopping service threads 2024-11-15T22:39:44,795 INFO [M:0;e611192d6313:37993 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T22:39:44,795 INFO [M:0;e611192d6313:37993 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T22:39:44,795 INFO [M:0;e611192d6313:37993 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T22:39:44,795 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T22:39:44,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T22:39:44,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:39:44,805 DEBUG [M:0;e611192d6313:37993 {}] zookeeper.ZKUtil(347): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T22:39:44,805 WARN [M:0;e611192d6313:37993 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T22:39:44,805 INFO [M:0;e611192d6313:37993 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/.lastflushedseqids 2024-11-15T22:39:44,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741849_1025 (size=130) 2024-11-15T22:39:44,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741849_1025 (size=130) 2024-11-15T22:39:44,811 INFO [M:0;e611192d6313:37993 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T22:39:44,811 INFO [M:0;e611192d6313:37993 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T22:39:44,811 DEBUG [M:0;e611192d6313:37993 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T22:39:44,811 INFO [M:0;e611192d6313:37993 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:39:44,811 DEBUG [M:0;e611192d6313:37993 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:39:44,811 DEBUG [M:0;e611192d6313:37993 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T22:39:44,811 DEBUG [M:0;e611192d6313:37993 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:39:44,811 INFO [M:0;e611192d6313:37993 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.55 KB heapSize=54.94 KB 2024-11-15T22:39:44,825 DEBUG [M:0;e611192d6313:37993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/75faa98086e24780b75444d5f1ba975d is 82, key is hbase:meta,,1/info:regioninfo/1731710333925/Put/seqid=0 2024-11-15T22:39:44,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741850_1026 (size=5672) 2024-11-15T22:39:44,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741850_1026 (size=5672) 2024-11-15T22:39:44,830 INFO [M:0;e611192d6313:37993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/75faa98086e24780b75444d5f1ba975d 2024-11-15T22:39:44,855 DEBUG [M:0;e611192d6313:37993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/11d5987f3c5342a1826cec725d6e78b7 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731710334438/Put/seqid=0 2024-11-15T22:39:44,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741851_1027 (size=7818) 2024-11-15T22:39:44,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741851_1027 (size=7818) 2024-11-15T22:39:44,859 INFO [M:0;e611192d6313:37993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.95 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/11d5987f3c5342a1826cec725d6e78b7 2024-11-15T22:39:44,865 INFO [M:0;e611192d6313:37993 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 11d5987f3c5342a1826cec725d6e78b7 2024-11-15T22:39:44,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:39:44,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41001-0x10140a6aae80001, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:39:44,884 INFO [RS:0;e611192d6313:41001 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T22:39:44,884 INFO [RS:0;e611192d6313:41001 {}] regionserver.HRegionServer(1031): Exiting; stopping=e611192d6313,41001,1731710332908; zookeeper connection closed. 2024-11-15T22:39:44,884 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3023458a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3023458a 2024-11-15T22:39:44,884 DEBUG [M:0;e611192d6313:37993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/47aa107c2a0f4f3aafb8ff355689f859 is 69, key is e611192d6313,41001,1731710332908/rs:state/1731710333369/Put/seqid=0 2024-11-15T22:39:44,884 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T22:39:44,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741852_1028 (size=5156) 2024-11-15T22:39:44,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741852_1028 (size=5156) 2024-11-15T22:39:45,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:45,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:45,291 INFO [M:0;e611192d6313:37993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/47aa107c2a0f4f3aafb8ff355689f859 2024-11-15T22:39:45,315 DEBUG [M:0;e611192d6313:37993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cddecdb061994212ba6673b3b3199880 is 52, key is load_balancer_on/state:d/1731710334049/Put/seqid=0 2024-11-15T22:39:45,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741853_1029 (size=5056) 2024-11-15T22:39:45,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741853_1029 (size=5056) 2024-11-15T22:39:45,320 INFO [M:0;e611192d6313:37993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cddecdb061994212ba6673b3b3199880 2024-11-15T22:39:45,324 DEBUG [M:0;e611192d6313:37993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/75faa98086e24780b75444d5f1ba975d as hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/75faa98086e24780b75444d5f1ba975d 2024-11-15T22:39:45,329 INFO [M:0;e611192d6313:37993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/75faa98086e24780b75444d5f1ba975d, entries=8, sequenceid=121, filesize=5.5 K 2024-11-15T22:39:45,330 DEBUG [M:0;e611192d6313:37993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/11d5987f3c5342a1826cec725d6e78b7 as hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/11d5987f3c5342a1826cec725d6e78b7 2024-11-15T22:39:45,334 INFO [M:0;e611192d6313:37993 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 11d5987f3c5342a1826cec725d6e78b7 2024-11-15T22:39:45,334 INFO [M:0;e611192d6313:37993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/11d5987f3c5342a1826cec725d6e78b7, entries=14, sequenceid=121, filesize=7.6 K 2024-11-15T22:39:45,335 DEBUG [M:0;e611192d6313:37993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/47aa107c2a0f4f3aafb8ff355689f859 as hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/47aa107c2a0f4f3aafb8ff355689f859 2024-11-15T22:39:45,339 INFO [M:0;e611192d6313:37993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/47aa107c2a0f4f3aafb8ff355689f859, entries=1, sequenceid=121, filesize=5.0 K 2024-11-15T22:39:45,340 DEBUG [M:0;e611192d6313:37993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cddecdb061994212ba6673b3b3199880 as hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/cddecdb061994212ba6673b3b3199880 2024-11-15T22:39:45,344 INFO [M:0;e611192d6313:37993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37049/user/jenkins/test-data/3193f6a2-7be8-a5db-e492-4f9fb312121d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/cddecdb061994212ba6673b3b3199880, entries=1, sequenceid=121, filesize=4.9 K 2024-11-15T22:39:45,345 INFO [M:0;e611192d6313:37993 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.55 KB/44593, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 534ms, sequenceid=121, compaction requested=false 2024-11-15T22:39:45,349 INFO [M:0;e611192d6313:37993 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:39:45,349 DEBUG [M:0;e611192d6313:37993 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731710384811Disabling compacts and flushes for region at 1731710384811Disabling writes for close at 1731710384811Obtaining lock to block concurrent updates at 1731710384811Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731710384811Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44593, getHeapSize=56192, getOffHeapSize=0, getCellsCount=140 at 1731710384812 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731710384812Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731710384812Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731710384825 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731710384825Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731710384835 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731710384854 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731710384854Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731710384865 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731710384883 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731710384883Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731710385301 (+418 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731710385314 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731710385314Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a96d4ea: reopening flushed file at 1731710385324 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e0adc82: reopening flushed file at 1731710385329 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e8944d5: reopening flushed file at 1731710385334 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ec794bb: reopening flushed file at 1731710385339 (+5 ms)Finished flush of dataSize ~43.55 KB/44593, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 534ms, sequenceid=121, compaction requested=false at 1731710385345 (+6 ms)Writing region close event to WAL at 1731710385349 (+4 ms)Closed at 1731710385349 2024-11-15T22:39:45,349 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:45,349 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:45,349 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:45,349 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:45,349 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:39:45,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741830_1006 (size=52990) 2024-11-15T22:39:45,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34321 is added to blk_1073741830_1006 (size=52990) 2024-11-15T22:39:45,352 INFO [M:0;e611192d6313:37993 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T22:39:45,352 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T22:39:45,352 INFO [M:0;e611192d6313:37993 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37993 2024-11-15T22:39:45,352 INFO [M:0;e611192d6313:37993 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T22:39:45,394 INFO [regionserver/e611192d6313:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T22:39:45,499 INFO [M:0;e611192d6313:37993 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T22:39:45,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:39:45,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37993-0x10140a6aae80000, quorum=127.0.0.1:59371, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:39:45,502 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5cdf1f1c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:39:45,502 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51e1410e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:39:45,502 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:39:45,502 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41bc1801{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:39:45,502 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5894e22d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/hadoop.log.dir/,STOPPED} 2024-11-15T22:39:45,504 WARN [BP-2142243783-172.17.0.3-1731710330493 heartbeating to localhost/127.0.0.1:37049 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:39:45,504 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:39:45,504 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:39:45,504 WARN [BP-2142243783-172.17.0.3-1731710330493 heartbeating to localhost/127.0.0.1:37049 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2142243783-172.17.0.3-1731710330493 (Datanode Uuid 1afffb5e-4f1a-45aa-8f7a-a4f980faf30d) service to localhost/127.0.0.1:37049 2024-11-15T22:39:45,505 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/cluster_199dd95a-bbc3-d47f-4dc5-ea929677a5e4/data/data3/current/BP-2142243783-172.17.0.3-1731710330493 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:39:45,505 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/cluster_199dd95a-bbc3-d47f-4dc5-ea929677a5e4/data/data4/current/BP-2142243783-172.17.0.3-1731710330493 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:39:45,506 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:39:45,508 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2526c219{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:39:45,508 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@74e6f5d9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:39:45,508 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:39:45,509 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bd1d692{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:39:45,509 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10c2896a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/hadoop.log.dir/,STOPPED} 2024-11-15T22:39:45,510 WARN [BP-2142243783-172.17.0.3-1731710330493 heartbeating to localhost/127.0.0.1:37049 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:39:45,510 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:39:45,510 WARN [BP-2142243783-172.17.0.3-1731710330493 heartbeating to localhost/127.0.0.1:37049 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2142243783-172.17.0.3-1731710330493 (Datanode Uuid 624e525e-2171-49e7-97e6-941e7a16f48a) service to localhost/127.0.0.1:37049 2024-11-15T22:39:45,510 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:39:45,511 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/cluster_199dd95a-bbc3-d47f-4dc5-ea929677a5e4/data/data1/current/BP-2142243783-172.17.0.3-1731710330493 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:39:45,511 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/cluster_199dd95a-bbc3-d47f-4dc5-ea929677a5e4/data/data2/current/BP-2142243783-172.17.0.3-1731710330493 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:39:45,511 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:39:45,518 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4d36967f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T22:39:45,518 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4d389c80{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:39:45,518 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:39:45,518 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b8edabe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:39:45,519 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c41fb6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/hadoop.log.dir/,STOPPED} 2024-11-15T22:39:45,524 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T22:39:45,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T22:39:45,550 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=208 (was 182) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37049 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37049 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37049 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37049 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37049 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37049 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37049 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:37049 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37049 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=86 (was 117), ProcessCount=11 (was 11), AvailableMemoryMB=3925 (was 3996) 2024-11-15T22:39:45,556 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=208, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=86, ProcessCount=11, AvailableMemoryMB=3925 2024-11-15T22:39:45,556 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T22:39:45,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/hadoop.log.dir so I do NOT create it in target/test-data/c5623668-9a91-6762-1fca-76fde62cf043 2024-11-15T22:39:45,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6c6885e4-b4e7-2c78-61ca-ba2674fa1884/hadoop.tmp.dir so I do NOT create it in target/test-data/c5623668-9a91-6762-1fca-76fde62cf043 2024-11-15T22:39:45,557 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/cluster_cab337df-ff59-49d3-a639-80c31a24b544, deleteOnExit=true 2024-11-15T22:39:45,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T22:39:45,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/test.cache.data in system properties and HBase conf 2024-11-15T22:39:45,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T22:39:45,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/hadoop.log.dir in system properties and HBase conf 2024-11-15T22:39:45,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T22:39:45,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T22:39:45,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T22:39:45,557 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T22:39:45,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T22:39:45,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T22:39:45,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T22:39:45,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T22:39:45,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T22:39:45,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T22:39:45,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T22:39:45,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T22:39:45,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T22:39:45,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/nfs.dump.dir in system properties and HBase conf 2024-11-15T22:39:45,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/java.io.tmpdir in system properties and HBase conf 2024-11-15T22:39:45,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T22:39:45,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T22:39:45,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T22:39:45,570 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T22:39:46,025 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:39:46,028 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:39:46,029 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:39:46,029 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:39:46,029 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T22:39:46,030 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:39:46,030 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79ecb530{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:39:46,030 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@191b8d86{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:39:46,122 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7884e2a5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/java.io.tmpdir/jetty-localhost-36581-hadoop-hdfs-3_4_1-tests_jar-_-any-14382506091393186843/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T22:39:46,123 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f87fe6{HTTP/1.1, (http/1.1)}{localhost:36581} 2024-11-15T22:39:46,123 INFO [Time-limited test {}] server.Server(415): Started @244280ms 2024-11-15T22:39:46,133 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T22:39:46,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:46,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:46,423 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:39:46,425 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:39:46,426 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:39:46,426 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:39:46,426 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:39:46,426 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49e87d3f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:39:46,426 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5773e0ea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:39:46,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f61588{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/java.io.tmpdir/jetty-localhost-38143-hadoop-hdfs-3_4_1-tests_jar-_-any-5338777804203572494/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:39:46,530 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a5db76d{HTTP/1.1, (http/1.1)}{localhost:38143} 2024-11-15T22:39:46,530 INFO [Time-limited test {}] server.Server(415): Started @244687ms 2024-11-15T22:39:46,531 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:39:46,555 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:39:46,557 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:39:46,558 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:39:46,558 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:39:46,558 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:39:46,559 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@665a776{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:39:46,559 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41b3b520{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:39:46,656 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c49e976{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/java.io.tmpdir/jetty-localhost-35013-hadoop-hdfs-3_4_1-tests_jar-_-any-1206938688007438660/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:39:46,656 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@48b1d1cb{HTTP/1.1, (http/1.1)}{localhost:35013} 2024-11-15T22:39:46,656 INFO [Time-limited test {}] server.Server(415): Started @244814ms 2024-11-15T22:39:46,657 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:39:47,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:47,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:47,664 WARN [Thread-1966 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/cluster_cab337df-ff59-49d3-a639-80c31a24b544/data/data2/current/BP-87975701-172.17.0.3-1731710385574/current, will proceed with Du for space computation calculation, 2024-11-15T22:39:47,664 WARN [Thread-1965 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/cluster_cab337df-ff59-49d3-a639-80c31a24b544/data/data1/current/BP-87975701-172.17.0.3-1731710385574/current, will proceed with Du for space computation calculation, 2024-11-15T22:39:47,680 WARN [Thread-1929 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:39:47,682 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x645dd63731155924 with lease ID 0xb36a0bf8e96e764e: Processing first storage report for DS-821b5d7b-b09b-4199-95c2-6e936ce8db28 from datanode DatanodeRegistration(127.0.0.1:45387, datanodeUuid=267c508b-10f6-4bef-85dc-84943fe942bf, infoPort=37289, infoSecurePort=0, ipcPort=42471, storageInfo=lv=-57;cid=testClusterID;nsid=405224605;c=1731710385574) 2024-11-15T22:39:47,682 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x645dd63731155924 with lease ID 0xb36a0bf8e96e764e: from storage DS-821b5d7b-b09b-4199-95c2-6e936ce8db28 node DatanodeRegistration(127.0.0.1:45387, datanodeUuid=267c508b-10f6-4bef-85dc-84943fe942bf, infoPort=37289, infoSecurePort=0, ipcPort=42471, storageInfo=lv=-57;cid=testClusterID;nsid=405224605;c=1731710385574), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:39:47,682 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x645dd63731155924 with lease ID 0xb36a0bf8e96e764e: Processing first storage report for DS-3366cd78-19ff-4f8f-8990-309639b437f6 from datanode DatanodeRegistration(127.0.0.1:45387, datanodeUuid=267c508b-10f6-4bef-85dc-84943fe942bf, infoPort=37289, infoSecurePort=0, ipcPort=42471, storageInfo=lv=-57;cid=testClusterID;nsid=405224605;c=1731710385574) 2024-11-15T22:39:47,682 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x645dd63731155924 with lease ID 0xb36a0bf8e96e764e: from storage DS-3366cd78-19ff-4f8f-8990-309639b437f6 node DatanodeRegistration(127.0.0.1:45387, datanodeUuid=267c508b-10f6-4bef-85dc-84943fe942bf, infoPort=37289, infoSecurePort=0, ipcPort=42471, storageInfo=lv=-57;cid=testClusterID;nsid=405224605;c=1731710385574), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:39:47,800 WARN [Thread-1976 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/cluster_cab337df-ff59-49d3-a639-80c31a24b544/data/data3/current/BP-87975701-172.17.0.3-1731710385574/current, will proceed with Du for space computation calculation, 2024-11-15T22:39:47,800 WARN [Thread-1977 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/cluster_cab337df-ff59-49d3-a639-80c31a24b544/data/data4/current/BP-87975701-172.17.0.3-1731710385574/current, will proceed with Du for space computation calculation, 2024-11-15T22:39:47,817 WARN [Thread-1952 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:39:47,819 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x93b8941589567f1b with lease ID 0xb36a0bf8e96e764f: Processing first storage report for DS-8ec989fa-6131-4189-87e7-9558d7a5dacf from datanode DatanodeRegistration(127.0.0.1:33291, datanodeUuid=af750f05-61e3-4426-8a6e-d9e39f6a1730, infoPort=37107, infoSecurePort=0, ipcPort=40993, storageInfo=lv=-57;cid=testClusterID;nsid=405224605;c=1731710385574) 2024-11-15T22:39:47,819 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x93b8941589567f1b with lease ID 0xb36a0bf8e96e764f: from storage DS-8ec989fa-6131-4189-87e7-9558d7a5dacf node DatanodeRegistration(127.0.0.1:33291, datanodeUuid=af750f05-61e3-4426-8a6e-d9e39f6a1730, infoPort=37107, infoSecurePort=0, ipcPort=40993, storageInfo=lv=-57;cid=testClusterID;nsid=405224605;c=1731710385574), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:39:47,819 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x93b8941589567f1b with lease ID 0xb36a0bf8e96e764f: Processing first storage report for DS-8d497264-3d3f-4565-b0f7-abd695c71700 from datanode DatanodeRegistration(127.0.0.1:33291, datanodeUuid=af750f05-61e3-4426-8a6e-d9e39f6a1730, infoPort=37107, infoSecurePort=0, ipcPort=40993, storageInfo=lv=-57;cid=testClusterID;nsid=405224605;c=1731710385574) 2024-11-15T22:39:47,819 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x93b8941589567f1b with lease ID 0xb36a0bf8e96e764f: from storage DS-8d497264-3d3f-4565-b0f7-abd695c71700 node DatanodeRegistration(127.0.0.1:33291, datanodeUuid=af750f05-61e3-4426-8a6e-d9e39f6a1730, infoPort=37107, infoSecurePort=0, ipcPort=40993, storageInfo=lv=-57;cid=testClusterID;nsid=405224605;c=1731710385574), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:39:47,889 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043 2024-11-15T22:39:47,896 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/cluster_cab337df-ff59-49d3-a639-80c31a24b544/zookeeper_0, clientPort=51416, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/cluster_cab337df-ff59-49d3-a639-80c31a24b544/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/cluster_cab337df-ff59-49d3-a639-80c31a24b544/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T22:39:47,897 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51416 2024-11-15T22:39:47,897 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:39:47,898 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:39:47,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741825_1001 (size=7) 2024-11-15T22:39:47,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741825_1001 (size=7) 2024-11-15T22:39:47,909 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789 with version=8 2024-11-15T22:39:47,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/hbase-staging 2024-11-15T22:39:47,911 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e611192d6313:0 server-side Connection retries=45 2024-11-15T22:39:47,911 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:39:47,911 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T22:39:47,911 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T22:39:47,911 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:39:47,911 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T22:39:47,911 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T22:39:47,912 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T22:39:47,912 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45633 2024-11-15T22:39:47,914 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45633 connecting to ZooKeeper ensemble=127.0.0.1:51416 2024-11-15T22:39:47,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:456330x0, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T22:39:47,957 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45633-0x10140a782610000 connected 2024-11-15T22:39:48,063 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:39:48,068 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:39:48,074 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:39:48,075 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789, hbase.cluster.distributed=false 2024-11-15T22:39:48,077 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T22:39:48,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45633 2024-11-15T22:39:48,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45633 2024-11-15T22:39:48,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45633 2024-11-15T22:39:48,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45633 2024-11-15T22:39:48,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45633 2024-11-15T22:39:48,091 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e611192d6313:0 server-side Connection retries=45 2024-11-15T22:39:48,091 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:39:48,091 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T22:39:48,091 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T22:39:48,091 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:39:48,091 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T22:39:48,091 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T22:39:48,091 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T22:39:48,092 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42145 2024-11-15T22:39:48,093 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42145 connecting to ZooKeeper ensemble=127.0.0.1:51416 2024-11-15T22:39:48,094 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:39:48,095 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:39:48,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:421450x0, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T22:39:48,110 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42145-0x10140a782610001 connected 2024-11-15T22:39:48,110 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:39:48,111 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T22:39:48,111 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T22:39:48,112 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T22:39:48,113 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T22:39:48,114 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42145 2024-11-15T22:39:48,114 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42145 2024-11-15T22:39:48,115 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42145 2024-11-15T22:39:48,116 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42145 2024-11-15T22:39:48,116 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42145 2024-11-15T22:39:48,130 DEBUG [M:0;e611192d6313:45633 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e611192d6313:45633 2024-11-15T22:39:48,130 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e611192d6313,45633,1731710387911 2024-11-15T22:39:48,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:39:48,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:39:48,142 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e611192d6313,45633,1731710387911 2024-11-15T22:39:48,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T22:39:48,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:39:48,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:39:48,152 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T22:39:48,153 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e611192d6313,45633,1731710387911 from backup master directory 2024-11-15T22:39:48,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:39:48,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e611192d6313,45633,1731710387911 2024-11-15T22:39:48,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:39:48,162 WARN [master/e611192d6313:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T22:39:48,162 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e611192d6313,45633,1731710387911 2024-11-15T22:39:48,166 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/hbase.id] with ID: 530f5074-562d-4aed-a87c-2f8babf4e784 2024-11-15T22:39:48,166 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/.tmp/hbase.id 2024-11-15T22:39:48,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741826_1002 (size=42) 2024-11-15T22:39:48,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741826_1002 (size=42) 2024-11-15T22:39:48,172 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/.tmp/hbase.id]:[hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/hbase.id] 2024-11-15T22:39:48,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:48,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:48,183 INFO [master/e611192d6313:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:39:48,183 INFO [master/e611192d6313:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T22:39:48,184 INFO [master/e611192d6313:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-15T22:39:48,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:39:48,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:39:48,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741827_1003 (size=196) 2024-11-15T22:39:48,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741827_1003 (size=196) 2024-11-15T22:39:48,200 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T22:39:48,200 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T22:39:48,201 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:39:48,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741828_1004 (size=1189) 2024-11-15T22:39:48,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741828_1004 (size=1189) 2024-11-15T22:39:48,207 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store 2024-11-15T22:39:48,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741829_1005 (size=34) 2024-11-15T22:39:48,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741829_1005 (size=34) 2024-11-15T22:39:48,215 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:39:48,215 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T22:39:48,215 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:39:48,215 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:39:48,215 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T22:39:48,215 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:39:48,215 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:39:48,215 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731710388215Disabling compacts and flushes for region at 1731710388215Disabling writes for close at 1731710388215Writing region close event to WAL at 1731710388215Closed at 1731710388215 2024-11-15T22:39:48,216 WARN [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/.initializing 2024-11-15T22:39:48,216 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/WALs/e611192d6313,45633,1731710387911 2024-11-15T22:39:48,219 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C45633%2C1731710387911, suffix=, logDir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/WALs/e611192d6313,45633,1731710387911, archiveDir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/oldWALs, maxLogs=10 2024-11-15T22:39:48,219 INFO [master/e611192d6313:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C45633%2C1731710387911.1731710388219 2024-11-15T22:39:48,225 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/WALs/e611192d6313,45633,1731710387911/e611192d6313%2C45633%2C1731710387911.1731710388219 2024-11-15T22:39:48,225 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37107:37107),(127.0.0.1/127.0.0.1:37289:37289)] 2024-11-15T22:39:48,226 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:39:48,226 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:39:48,226 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:39:48,226 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:39:48,227 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:39:48,228 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T22:39:48,228 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:39:48,229 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:39:48,229 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:39:48,230 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T22:39:48,230 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:39:48,230 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:39:48,230 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:39:48,231 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T22:39:48,231 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:39:48,231 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:39:48,231 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:39:48,232 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T22:39:48,232 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:39:48,233 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:39:48,233 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:39:48,233 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:39:48,233 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:39:48,234 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:39:48,234 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:39:48,235 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T22:39:48,236 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:39:48,238 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:39:48,238 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=803400, jitterRate=0.021576404571533203}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T22:39:48,239 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731710388226Initializing all the Stores at 1731710388227 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710388227Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710388227Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710388227Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710388227Cleaning up temporary data from old regions at 1731710388234 (+7 ms)Region opened successfully at 1731710388239 (+5 ms) 2024-11-15T22:39:48,239 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T22:39:48,242 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c3d9d64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e611192d6313/172.17.0.3:0 2024-11-15T22:39:48,242 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T22:39:48,243 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T22:39:48,243 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T22:39:48,243 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T22:39:48,243 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T22:39:48,243 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T22:39:48,243 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T22:39:48,245 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T22:39:48,246 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T22:39:48,257 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T22:39:48,257 INFO [master/e611192d6313:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T22:39:48,258 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T22:39:48,267 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T22:39:48,268 INFO [master/e611192d6313:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T22:39:48,269 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T22:39:48,278 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T22:39:48,279 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T22:39:48,288 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T22:39:48,290 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T22:39:48,299 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T22:39:48,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T22:39:48,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T22:39:48,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:39:48,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:39:48,310 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e611192d6313,45633,1731710387911, sessionid=0x10140a782610000, setting cluster-up flag (Was=false) 2024-11-15T22:39:48,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:39:48,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:39:48,362 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T22:39:48,364 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e611192d6313,45633,1731710387911 2024-11-15T22:39:48,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:39:48,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:39:48,402 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T22:39:48,402 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T22:39:48,402 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T22:39:48,415 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T22:39:48,416 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e611192d6313,45633,1731710387911 2024-11-15T22:39:48,418 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T22:39:48,420 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T22:39:48,420 INFO [master/e611192d6313:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T22:39:48,420 INFO [master/e611192d6313:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T22:39:48,420 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e611192d6313,45633,1731710387911 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T22:39:48,422 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:39:48,422 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:39:48,422 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:39:48,422 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:39:48,423 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e611192d6313:0, corePoolSize=10, maxPoolSize=10 2024-11-15T22:39:48,423 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:39:48,423 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e611192d6313:0, corePoolSize=2, maxPoolSize=2 2024-11-15T22:39:48,423 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:39:48,424 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731710418424 2024-11-15T22:39:48,424 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T22:39:48,424 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T22:39:48,425 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T22:39:48,425 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T22:39:48,425 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T22:39:48,425 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T22:39:48,425 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:48,426 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T22:39:48,426 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T22:39:48,426 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:39:48,426 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T22:39:48,426 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T22:39:48,426 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T22:39:48,426 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T22:39:48,427 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710388426,5,FailOnTimeoutGroup] 2024-11-15T22:39:48,427 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710388427,5,FailOnTimeoutGroup] 2024-11-15T22:39:48,427 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:48,427 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T22:39:48,427 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:48,427 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:48,428 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:39:48,428 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T22:39:48,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741831_1007 (size=1321) 2024-11-15T22:39:48,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741831_1007 (size=1321) 2024-11-15T22:39:48,434 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T22:39:48,434 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789 2024-11-15T22:39:48,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741832_1008 (size=32) 2024-11-15T22:39:48,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741832_1008 (size=32) 2024-11-15T22:39:48,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:39:48,441 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T22:39:48,442 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T22:39:48,442 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:39:48,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:39:48,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T22:39:48,444 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T22:39:48,444 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:39:48,444 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:39:48,444 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T22:39:48,445 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T22:39:48,445 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:39:48,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:39:48,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T22:39:48,446 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T22:39:48,446 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:39:48,446 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:39:48,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T22:39:48,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740 2024-11-15T22:39:48,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740 2024-11-15T22:39:48,448 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T22:39:48,448 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T22:39:48,449 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T22:39:48,450 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T22:39:48,451 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:39:48,452 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=761461, jitterRate=-0.03175252676010132}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T22:39:48,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731710388440Initializing all the Stores at 1731710388441 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710388441Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710388441Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710388441Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710388441Cleaning up temporary data from old regions at 1731710388448 (+7 ms)Region opened successfully at 1731710388452 (+4 ms) 2024-11-15T22:39:48,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T22:39:48,452 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T22:39:48,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T22:39:48,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T22:39:48,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T22:39:48,453 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T22:39:48,453 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731710388452Disabling compacts and flushes for region at 1731710388452Disabling writes for close at 1731710388452Writing region close event to WAL at 1731710388453 (+1 ms)Closed at 1731710388453 2024-11-15T22:39:48,454 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:39:48,454 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T22:39:48,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T22:39:48,455 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T22:39:48,456 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T22:39:48,518 INFO [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(746): ClusterId : 530f5074-562d-4aed-a87c-2f8babf4e784 2024-11-15T22:39:48,518 DEBUG [RS:0;e611192d6313:42145 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T22:39:48,532 DEBUG [RS:0;e611192d6313:42145 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T22:39:48,533 DEBUG [RS:0;e611192d6313:42145 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T22:39:48,543 DEBUG [RS:0;e611192d6313:42145 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T22:39:48,544 DEBUG [RS:0;e611192d6313:42145 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a02d0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e611192d6313/172.17.0.3:0 2024-11-15T22:39:48,558 DEBUG [RS:0;e611192d6313:42145 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e611192d6313:42145 2024-11-15T22:39:48,558 INFO [RS:0;e611192d6313:42145 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T22:39:48,559 INFO [RS:0;e611192d6313:42145 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T22:39:48,559 DEBUG [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T22:39:48,559 INFO [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(2659): reportForDuty to master=e611192d6313,45633,1731710387911 with port=42145, startcode=1731710388091 2024-11-15T22:39:48,559 DEBUG [RS:0;e611192d6313:42145 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T22:39:48,561 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60951, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T22:39:48,561 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45633 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e611192d6313,42145,1731710388091 2024-11-15T22:39:48,562 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45633 {}] master.ServerManager(517): Registering regionserver=e611192d6313,42145,1731710388091 2024-11-15T22:39:48,563 DEBUG [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789 2024-11-15T22:39:48,563 DEBUG [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40299 2024-11-15T22:39:48,563 DEBUG [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T22:39:48,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:39:48,573 DEBUG [RS:0;e611192d6313:42145 {}] zookeeper.ZKUtil(111): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e611192d6313,42145,1731710388091 2024-11-15T22:39:48,573 WARN [RS:0;e611192d6313:42145 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T22:39:48,573 INFO [RS:0;e611192d6313:42145 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:39:48,573 DEBUG [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/WALs/e611192d6313,42145,1731710388091 2024-11-15T22:39:48,573 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e611192d6313,42145,1731710388091] 2024-11-15T22:39:48,576 INFO [RS:0;e611192d6313:42145 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T22:39:48,577 INFO [RS:0;e611192d6313:42145 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T22:39:48,578 INFO [RS:0;e611192d6313:42145 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T22:39:48,578 INFO [RS:0;e611192d6313:42145 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:48,578 INFO [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T22:39:48,579 INFO [RS:0;e611192d6313:42145 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T22:39:48,579 INFO [RS:0;e611192d6313:42145 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:48,579 DEBUG [RS:0;e611192d6313:42145 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:39:48,579 DEBUG [RS:0;e611192d6313:42145 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:39:48,579 DEBUG [RS:0;e611192d6313:42145 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:39:48,579 DEBUG [RS:0;e611192d6313:42145 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:39:48,579 DEBUG [RS:0;e611192d6313:42145 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:39:48,579 DEBUG [RS:0;e611192d6313:42145 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e611192d6313:0, corePoolSize=2, maxPoolSize=2 2024-11-15T22:39:48,579 DEBUG [RS:0;e611192d6313:42145 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:39:48,579 DEBUG [RS:0;e611192d6313:42145 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:39:48,579 DEBUG [RS:0;e611192d6313:42145 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:39:48,579 DEBUG [RS:0;e611192d6313:42145 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:39:48,579 DEBUG [RS:0;e611192d6313:42145 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:39:48,579 DEBUG [RS:0;e611192d6313:42145 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:39:48,579 DEBUG [RS:0;e611192d6313:42145 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:39:48,579 DEBUG [RS:0;e611192d6313:42145 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:39:48,580 INFO [RS:0;e611192d6313:42145 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:48,580 INFO [RS:0;e611192d6313:42145 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:48,580 INFO [RS:0;e611192d6313:42145 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:48,580 INFO [RS:0;e611192d6313:42145 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:48,580 INFO [RS:0;e611192d6313:42145 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:48,580 INFO [RS:0;e611192d6313:42145 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,42145,1731710388091-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T22:39:48,593 INFO [RS:0;e611192d6313:42145 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T22:39:48,593 INFO [RS:0;e611192d6313:42145 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,42145,1731710388091-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:48,594 INFO [RS:0;e611192d6313:42145 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:48,594 INFO [RS:0;e611192d6313:42145 {}] regionserver.Replication(171): e611192d6313,42145,1731710388091 started 2024-11-15T22:39:48,606 WARN [e611192d6313:45633 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T22:39:48,606 INFO [RS:0;e611192d6313:42145 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:48,606 INFO [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(1482): Serving as e611192d6313,42145,1731710388091, RpcServer on e611192d6313/172.17.0.3:42145, sessionid=0x10140a782610001 2024-11-15T22:39:48,606 DEBUG [RS:0;e611192d6313:42145 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T22:39:48,606 DEBUG [RS:0;e611192d6313:42145 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e611192d6313,42145,1731710388091 2024-11-15T22:39:48,607 DEBUG [RS:0;e611192d6313:42145 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,42145,1731710388091' 2024-11-15T22:39:48,607 DEBUG [RS:0;e611192d6313:42145 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T22:39:48,607 DEBUG [RS:0;e611192d6313:42145 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T22:39:48,607 DEBUG [RS:0;e611192d6313:42145 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T22:39:48,607 DEBUG [RS:0;e611192d6313:42145 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T22:39:48,607 DEBUG [RS:0;e611192d6313:42145 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e611192d6313,42145,1731710388091 2024-11-15T22:39:48,607 DEBUG [RS:0;e611192d6313:42145 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,42145,1731710388091' 2024-11-15T22:39:48,607 DEBUG [RS:0;e611192d6313:42145 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T22:39:48,608 DEBUG [RS:0;e611192d6313:42145 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T22:39:48,608 DEBUG [RS:0;e611192d6313:42145 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T22:39:48,608 INFO [RS:0;e611192d6313:42145 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T22:39:48,608 INFO [RS:0;e611192d6313:42145 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T22:39:48,712 INFO [RS:0;e611192d6313:42145 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C42145%2C1731710388091, suffix=, logDir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/WALs/e611192d6313,42145,1731710388091, archiveDir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/oldWALs, maxLogs=32 2024-11-15T22:39:48,713 INFO [RS:0;e611192d6313:42145 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C42145%2C1731710388091.1731710388713 2024-11-15T22:39:48,722 INFO [RS:0;e611192d6313:42145 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/WALs/e611192d6313,42145,1731710388091/e611192d6313%2C42145%2C1731710388091.1731710388713 2024-11-15T22:39:48,724 DEBUG [RS:0;e611192d6313:42145 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37289:37289),(127.0.0.1/127.0.0.1:37107:37107)] 2024-11-15T22:39:48,856 DEBUG [e611192d6313:45633 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T22:39:48,858 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e611192d6313,42145,1731710388091 2024-11-15T22:39:48,861 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e611192d6313,42145,1731710388091, state=OPENING 2024-11-15T22:39:48,873 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T22:39:48,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:39:48,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:39:48,886 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T22:39:48,886 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:39:48,886 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:39:48,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e611192d6313,42145,1731710388091}] 2024-11-15T22:39:49,042 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T22:39:49,045 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50867, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T22:39:49,051 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T22:39:49,051 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:39:49,054 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C42145%2C1731710388091.meta, suffix=.meta, logDir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/WALs/e611192d6313,42145,1731710388091, archiveDir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/oldWALs, maxLogs=32 2024-11-15T22:39:49,054 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C42145%2C1731710388091.meta.1731710389054.meta 2024-11-15T22:39:49,060 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/WALs/e611192d6313,42145,1731710388091/e611192d6313%2C42145%2C1731710388091.meta.1731710389054.meta 2024-11-15T22:39:49,061 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37289:37289),(127.0.0.1/127.0.0.1:37107:37107)] 2024-11-15T22:39:49,062 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:39:49,062 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T22:39:49,062 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T22:39:49,062 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T22:39:49,062 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T22:39:49,062 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:39:49,062 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T22:39:49,062 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T22:39:49,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T22:39:49,064 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T22:39:49,064 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:39:49,065 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:39:49,065 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T22:39:49,066 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T22:39:49,066 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:39:49,066 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:39:49,066 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T22:39:49,067 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T22:39:49,067 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:39:49,067 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:39:49,067 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T22:39:49,068 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T22:39:49,068 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:39:49,068 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:39:49,068 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T22:39:49,069 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740 2024-11-15T22:39:49,070 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740 2024-11-15T22:39:49,071 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T22:39:49,071 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T22:39:49,072 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T22:39:49,073 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T22:39:49,074 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=700657, jitterRate=-0.10906876623630524}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T22:39:49,074 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T22:39:49,074 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731710389062Writing region info on filesystem at 1731710389062Initializing all the Stores at 1731710389063 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710389063Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710389063Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710389063Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710389063Cleaning up temporary data from old regions at 1731710389071 (+8 ms)Running coprocessor post-open hooks at 1731710389074 (+3 ms)Region opened successfully at 1731710389074 2024-11-15T22:39:49,075 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731710389042 2024-11-15T22:39:49,077 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T22:39:49,077 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T22:39:49,078 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e611192d6313,42145,1731710388091 2024-11-15T22:39:49,079 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e611192d6313,42145,1731710388091, state=OPEN 2024-11-15T22:39:49,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T22:39:49,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T22:39:49,116 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e611192d6313,42145,1731710388091 2024-11-15T22:39:49,116 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:39:49,116 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:39:49,120 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T22:39:49,120 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e611192d6313,42145,1731710388091 in 230 msec 2024-11-15T22:39:49,124 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T22:39:49,124 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 666 msec 2024-11-15T22:39:49,125 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:39:49,125 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T22:39:49,127 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T22:39:49,127 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e611192d6313,42145,1731710388091, seqNum=-1] 2024-11-15T22:39:49,127 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T22:39:49,129 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54921, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T22:39:49,136 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 716 msec 2024-11-15T22:39:49,136 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731710389136, completionTime=-1 2024-11-15T22:39:49,136 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T22:39:49,137 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T22:39:49,139 INFO [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T22:39:49,139 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731710449139 2024-11-15T22:39:49,139 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731710509139 2024-11-15T22:39:49,139 INFO [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-15T22:39:49,139 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,45633,1731710387911-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:49,139 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,45633,1731710387911-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:49,139 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,45633,1731710387911-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:49,139 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e611192d6313:45633, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:49,139 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:49,139 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:49,141 DEBUG [master/e611192d6313:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T22:39:49,143 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.981sec 2024-11-15T22:39:49,143 INFO [master/e611192d6313:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T22:39:49,143 INFO [master/e611192d6313:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T22:39:49,143 INFO [master/e611192d6313:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T22:39:49,143 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T22:39:49,143 INFO [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T22:39:49,143 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,45633,1731710387911-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T22:39:49,143 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,45633,1731710387911-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T22:39:49,146 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T22:39:49,146 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T22:39:49,146 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,45633,1731710387911-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:39:49,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:49,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:49,219 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5958c6d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:39:49,219 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e611192d6313,45633,-1 for getting cluster id 2024-11-15T22:39:49,220 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T22:39:49,223 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '530f5074-562d-4aed-a87c-2f8babf4e784' 2024-11-15T22:39:49,224 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T22:39:49,224 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "530f5074-562d-4aed-a87c-2f8babf4e784" 2024-11-15T22:39:49,224 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@370771f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:39:49,224 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e611192d6313,45633,-1] 2024-11-15T22:39:49,225 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T22:39:49,225 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:39:49,227 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43192, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T22:39:49,228 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5164b8d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:39:49,228 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T22:39:49,230 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e611192d6313,42145,1731710388091, seqNum=-1] 2024-11-15T22:39:49,231 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T22:39:49,232 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40746, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T22:39:49,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e611192d6313,45633,1731710387911 2024-11-15T22:39:49,234 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:39:49,237 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T22:39:49,237 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T22:39:49,238 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is e611192d6313,45633,1731710387911 2024-11-15T22:39:49,238 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@47ba5de5 2024-11-15T22:39:49,238 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T22:39:49,239 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43200, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T22:39:49,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45633 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T22:39:49,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45633 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T22:39:49,240 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45633 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T22:39:49,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45633 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-15T22:39:49,243 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T22:39:49,243 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:39:49,243 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45633 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-15T22:39:49,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45633 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T22:39:49,244 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T22:39:49,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741835_1011 (size=381) 2024-11-15T22:39:49,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741835_1011 (size=381) 2024-11-15T22:39:49,252 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0823cc1fab0ce53b4677252ec2fad0f8, NAME => 'TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789 2024-11-15T22:39:49,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741836_1012 (size=64) 2024-11-15T22:39:49,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741836_1012 (size=64) 2024-11-15T22:39:49,258 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:39:49,258 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 0823cc1fab0ce53b4677252ec2fad0f8, disabling compactions & flushes 2024-11-15T22:39:49,258 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. 2024-11-15T22:39:49,258 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. 2024-11-15T22:39:49,258 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. after waiting 0 ms 2024-11-15T22:39:49,258 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. 2024-11-15T22:39:49,258 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. 2024-11-15T22:39:49,258 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0823cc1fab0ce53b4677252ec2fad0f8: Waiting for close lock at 1731710389258Disabling compacts and flushes for region at 1731710389258Disabling writes for close at 1731710389258Writing region close event to WAL at 1731710389258Closed at 1731710389258 2024-11-15T22:39:49,259 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T22:39:49,260 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731710389259"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731710389259"}]},"ts":"1731710389259"} 2024-11-15T22:39:49,262 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T22:39:49,262 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T22:39:49,263 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731710389263"}]},"ts":"1731710389263"} 2024-11-15T22:39:49,265 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-15T22:39:49,265 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0823cc1fab0ce53b4677252ec2fad0f8, ASSIGN}] 2024-11-15T22:39:49,266 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0823cc1fab0ce53b4677252ec2fad0f8, ASSIGN 2024-11-15T22:39:49,267 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0823cc1fab0ce53b4677252ec2fad0f8, ASSIGN; state=OFFLINE, location=e611192d6313,42145,1731710388091; forceNewPlan=false, retain=false 2024-11-15T22:39:49,419 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0823cc1fab0ce53b4677252ec2fad0f8, regionState=OPENING, regionLocation=e611192d6313,42145,1731710388091 2024-11-15T22:39:49,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0823cc1fab0ce53b4677252ec2fad0f8, ASSIGN because future has completed 2024-11-15T22:39:49,425 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0823cc1fab0ce53b4677252ec2fad0f8, server=e611192d6313,42145,1731710388091}] 2024-11-15T22:39:49,556 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,556 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,556 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,556 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:49,581 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. 2024-11-15T22:39:49,582 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0823cc1fab0ce53b4677252ec2fad0f8, NAME => 'TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:39:49,582 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:39:49,582 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:39:49,582 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:39:49,582 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:39:49,583 INFO [StoreOpener-0823cc1fab0ce53b4677252ec2fad0f8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:39:49,584 INFO [StoreOpener-0823cc1fab0ce53b4677252ec2fad0f8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0823cc1fab0ce53b4677252ec2fad0f8 columnFamilyName info 2024-11-15T22:39:49,584 DEBUG [StoreOpener-0823cc1fab0ce53b4677252ec2fad0f8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:39:49,585 INFO [StoreOpener-0823cc1fab0ce53b4677252ec2fad0f8-1 {}] regionserver.HStore(327): Store=0823cc1fab0ce53b4677252ec2fad0f8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:39:49,585 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:39:49,586 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:39:49,586 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:39:49,586 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:39:49,586 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:39:49,588 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:39:49,590 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:39:49,590 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0823cc1fab0ce53b4677252ec2fad0f8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860103, jitterRate=0.0936775654554367}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T22:39:49,590 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:39:49,591 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0823cc1fab0ce53b4677252ec2fad0f8: Running coprocessor pre-open hook at 1731710389582Writing region info on filesystem at 1731710389582Initializing all the Stores at 1731710389583 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710389583Cleaning up temporary data from old regions at 1731710389586 (+3 ms)Running coprocessor post-open hooks at 1731710389590 (+4 ms)Region opened successfully at 1731710389591 (+1 ms) 2024-11-15T22:39:49,592 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8., pid=6, masterSystemTime=1731710389578 2024-11-15T22:39:49,594 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. 2024-11-15T22:39:49,594 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. 2024-11-15T22:39:49,594 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0823cc1fab0ce53b4677252ec2fad0f8, regionState=OPEN, openSeqNum=2, regionLocation=e611192d6313,42145,1731710388091 2024-11-15T22:39:49,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0823cc1fab0ce53b4677252ec2fad0f8, server=e611192d6313,42145,1731710388091 because future has completed 2024-11-15T22:39:49,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T22:39:49,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0823cc1fab0ce53b4677252ec2fad0f8, server=e611192d6313,42145,1731710388091 in 172 msec 2024-11-15T22:39:49,601 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T22:39:49,601 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0823cc1fab0ce53b4677252ec2fad0f8, ASSIGN in 334 msec 2024-11-15T22:39:49,602 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T22:39:49,602 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731710389602"}]},"ts":"1731710389602"} 2024-11-15T22:39:49,604 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-15T22:39:49,605 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T22:39:49,607 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 365 msec 2024-11-15T22:39:50,087 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T22:39:50,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,092 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,092 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,093 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,093 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,113 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,113 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,113 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,120 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:50,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:50,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:51,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:51,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:52,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:52,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:53,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:53,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:54,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:54,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:54,576 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T22:39:54,578 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-15T22:39:55,088 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T22:39:55,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,092 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,093 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,093 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,097 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,098 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,120 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,120 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,120 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,121 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,121 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,121 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:39:55,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:55,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:56,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:56,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:57,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:57,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:58,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:58,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:58,402 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T22:39:58,402 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T22:39:58,404 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T22:39:58,404 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T22:39:58,404 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-15T22:39:58,404 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-15T22:39:59,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:59,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:39:59,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45633 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T22:39:59,349 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-15T22:39:59,349 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-15T22:39:59,351 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-15T22:39:59,351 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. 2024-11-15T22:39:59,354 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8., hostname=e611192d6313,42145,1731710388091, seqNum=2] 2024-11-15T22:39:59,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:39:59,368 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0823cc1fab0ce53b4677252ec2fad0f8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T22:39:59,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/31132d2348a54555893d106db1444ba8 is 1080, key is row0001/info:/1731710399355/Put/seqid=0 2024-11-15T22:39:59,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741837_1013 (size=12509) 2024-11-15T22:39:59,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741837_1013 (size=12509) 2024-11-15T22:39:59,397 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/31132d2348a54555893d106db1444ba8 2024-11-15T22:39:59,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/31132d2348a54555893d106db1444ba8 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/31132d2348a54555893d106db1444ba8 2024-11-15T22:39:59,410 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/31132d2348a54555893d106db1444ba8, entries=7, sequenceid=11, filesize=12.2 K 2024-11-15T22:39:59,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=21.02 KB/21520 for 0823cc1fab0ce53b4677252ec2fad0f8 in 43ms, sequenceid=11, compaction requested=false 2024-11-15T22:39:59,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0823cc1fab0ce53b4677252ec2fad0f8: 2024-11-15T22:39:59,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:39:59,413 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0823cc1fab0ce53b4677252ec2fad0f8 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-11-15T22:39:59,417 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/1aae2db1cfed47bc9016e884c626b5bd is 1080, key is row0008/info:/1731710399369/Put/seqid=0 2024-11-15T22:39:59,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741838_1014 (size=28684) 2024-11-15T22:39:59,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741838_1014 (size=28684) 2024-11-15T22:39:59,421 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=36 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/1aae2db1cfed47bc9016e884c626b5bd 2024-11-15T22:39:59,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/1aae2db1cfed47bc9016e884c626b5bd as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/1aae2db1cfed47bc9016e884c626b5bd 2024-11-15T22:39:59,432 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/1aae2db1cfed47bc9016e884c626b5bd, entries=22, sequenceid=36, filesize=28.0 K 2024-11-15T22:39:59,433 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=3.15 KB/3228 for 0823cc1fab0ce53b4677252ec2fad0f8 in 20ms, sequenceid=36, compaction requested=false 2024-11-15T22:39:59,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0823cc1fab0ce53b4677252ec2fad0f8: 2024-11-15T22:39:59,434 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.2 K, sizeToCheck=16.0 K 2024-11-15T22:39:59,434 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:39:59,434 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/1aae2db1cfed47bc9016e884c626b5bd because midkey is the same as first or last row 2024-11-15T22:40:00,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:00,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:01,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:01,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:01,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:01,435 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0823cc1fab0ce53b4677252ec2fad0f8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T22:40:01,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/c56a2fe7afec4c508708fc33c79e3ec7 is 1080, key is row0030/info:/1731710399414/Put/seqid=0 2024-11-15T22:40:01,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741839_1015 (size=12509) 2024-11-15T22:40:01,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741839_1015 (size=12509) 2024-11-15T22:40:01,446 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=46 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/c56a2fe7afec4c508708fc33c79e3ec7 2024-11-15T22:40:01,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/c56a2fe7afec4c508708fc33c79e3ec7 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/c56a2fe7afec4c508708fc33c79e3ec7 2024-11-15T22:40:01,457 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/c56a2fe7afec4c508708fc33c79e3ec7, entries=7, sequenceid=46, filesize=12.2 K 2024-11-15T22:40:01,458 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 0823cc1fab0ce53b4677252ec2fad0f8 in 23ms, sequenceid=46, compaction requested=true 2024-11-15T22:40:01,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0823cc1fab0ce53b4677252ec2fad0f8: 2024-11-15T22:40:01,458 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=52.4 K, sizeToCheck=16.0 K 2024-11-15T22:40:01,458 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:40:01,459 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/1aae2db1cfed47bc9016e884c626b5bd because midkey is the same as first or last row 2024-11-15T22:40:01,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0823cc1fab0ce53b4677252ec2fad0f8:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T22:40:01,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:01,459 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T22:40:01,460 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53702 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T22:40:01,460 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1541): 0823cc1fab0ce53b4677252ec2fad0f8/info is initiating minor compaction (all files) 2024-11-15T22:40:01,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:01,460 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0823cc1fab0ce53b4677252ec2fad0f8/info in TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. 2024-11-15T22:40:01,460 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0823cc1fab0ce53b4677252ec2fad0f8 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-15T22:40:01,461 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/31132d2348a54555893d106db1444ba8, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/1aae2db1cfed47bc9016e884c626b5bd, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/c56a2fe7afec4c508708fc33c79e3ec7] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp, totalSize=52.4 K 2024-11-15T22:40:01,461 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 31132d2348a54555893d106db1444ba8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731710399355 2024-11-15T22:40:01,462 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1aae2db1cfed47bc9016e884c626b5bd, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=36, earliestPutTs=1731710399369 2024-11-15T22:40:01,462 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting c56a2fe7afec4c508708fc33c79e3ec7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1731710399414 2024-11-15T22:40:01,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/e2b5bc9b39cb42ef9df2ddc66052eb3e is 1080, key is row0037/info:/1731710401437/Put/seqid=0 2024-11-15T22:40:01,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741840_1016 (size=17894) 2024-11-15T22:40:01,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741840_1016 (size=17894) 2024-11-15T22:40:01,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=61 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/e2b5bc9b39cb42ef9df2ddc66052eb3e 2024-11-15T22:40:01,479 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0823cc1fab0ce53b4677252ec2fad0f8#info#compaction#60 average throughput is 36.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:40:01,479 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/ac3142d5bd39491998efd850f0e44892 is 1080, key is row0001/info:/1731710399355/Put/seqid=0 2024-11-15T22:40:01,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/e2b5bc9b39cb42ef9df2ddc66052eb3e as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/e2b5bc9b39cb42ef9df2ddc66052eb3e 2024-11-15T22:40:01,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741841_1017 (size=43901) 2024-11-15T22:40:01,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741841_1017 (size=43901) 2024-11-15T22:40:01,488 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/e2b5bc9b39cb42ef9df2ddc66052eb3e, entries=12, sequenceid=61, filesize=17.5 K 2024-11-15T22:40:01,489 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=12.61 KB/12912 for 0823cc1fab0ce53b4677252ec2fad0f8 in 29ms, sequenceid=61, compaction requested=false 2024-11-15T22:40:01,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0823cc1fab0ce53b4677252ec2fad0f8: 2024-11-15T22:40:01,490 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.9 K, sizeToCheck=16.0 K 2024-11-15T22:40:01,490 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:40:01,490 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/1aae2db1cfed47bc9016e884c626b5bd because midkey is the same as first or last row 2024-11-15T22:40:01,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:01,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0823cc1fab0ce53b4677252ec2fad0f8 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-15T22:40:01,492 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/ac3142d5bd39491998efd850f0e44892 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ac3142d5bd39491998efd850f0e44892 2024-11-15T22:40:01,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/e6c703a7d81f409b906a5198232f3474 is 1080, key is row0049/info:/1731710401462/Put/seqid=0 2024-11-15T22:40:01,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741842_1018 (size=18987) 2024-11-15T22:40:01,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741842_1018 (size=18987) 2024-11-15T22:40:01,499 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0823cc1fab0ce53b4677252ec2fad0f8/info of 0823cc1fab0ce53b4677252ec2fad0f8 into ac3142d5bd39491998efd850f0e44892(size=42.9 K), total size for store is 60.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:40:01,499 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0823cc1fab0ce53b4677252ec2fad0f8: 2024-11-15T22:40:01,499 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8., storeName=0823cc1fab0ce53b4677252ec2fad0f8/info, priority=13, startTime=1731710401459; duration=0sec 2024-11-15T22:40:01,499 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-11-15T22:40:01,499 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:40:01,500 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ac3142d5bd39491998efd850f0e44892 because midkey is the same as first or last row 2024-11-15T22:40:01,500 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-11-15T22:40:01,500 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:40:01,500 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ac3142d5bd39491998efd850f0e44892 because midkey is the same as first or last row 2024-11-15T22:40:01,500 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-11-15T22:40:01,500 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:40:01,500 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ac3142d5bd39491998efd850f0e44892 because midkey is the same as first or last row 2024-11-15T22:40:01,500 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:01,500 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0823cc1fab0ce53b4677252ec2fad0f8:info 2024-11-15T22:40:01,501 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/e6c703a7d81f409b906a5198232f3474 2024-11-15T22:40:01,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/e6c703a7d81f409b906a5198232f3474 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/e6c703a7d81f409b906a5198232f3474 2024-11-15T22:40:01,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/e6c703a7d81f409b906a5198232f3474, entries=13, sequenceid=77, filesize=18.5 K 2024-11-15T22:40:01,511 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=3.15 KB/3228 for 0823cc1fab0ce53b4677252ec2fad0f8 in 21ms, sequenceid=77, compaction requested=true 2024-11-15T22:40:01,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0823cc1fab0ce53b4677252ec2fad0f8: 2024-11-15T22:40:01,512 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.9 K, sizeToCheck=16.0 K 2024-11-15T22:40:01,512 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:40:01,512 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ac3142d5bd39491998efd850f0e44892 because midkey is the same as first or last row 2024-11-15T22:40:01,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0823cc1fab0ce53b4677252ec2fad0f8:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T22:40:01,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:01,512 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T22:40:01,513 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 80782 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T22:40:01,513 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1541): 0823cc1fab0ce53b4677252ec2fad0f8/info is initiating minor compaction (all files) 2024-11-15T22:40:01,513 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0823cc1fab0ce53b4677252ec2fad0f8/info in TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. 2024-11-15T22:40:01,513 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ac3142d5bd39491998efd850f0e44892, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/e2b5bc9b39cb42ef9df2ddc66052eb3e, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/e6c703a7d81f409b906a5198232f3474] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp, totalSize=78.9 K 2024-11-15T22:40:01,513 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting ac3142d5bd39491998efd850f0e44892, keycount=36, bloomtype=ROW, size=42.9 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1731710399355 2024-11-15T22:40:01,514 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting e2b5bc9b39cb42ef9df2ddc66052eb3e, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=61, earliestPutTs=1731710401437 2024-11-15T22:40:01,514 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting e6c703a7d81f409b906a5198232f3474, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1731710401462 2024-11-15T22:40:01,525 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0823cc1fab0ce53b4677252ec2fad0f8#info#compaction#62 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:40:01,526 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/ef2f20102d714cef8e2adc2d302c1fb9 is 1080, key is row0001/info:/1731710399355/Put/seqid=0 2024-11-15T22:40:01,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741843_1019 (size=71001) 2024-11-15T22:40:01,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741843_1019 (size=71001) 2024-11-15T22:40:01,535 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/ef2f20102d714cef8e2adc2d302c1fb9 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ef2f20102d714cef8e2adc2d302c1fb9 2024-11-15T22:40:01,542 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0823cc1fab0ce53b4677252ec2fad0f8/info of 0823cc1fab0ce53b4677252ec2fad0f8 into ef2f20102d714cef8e2adc2d302c1fb9(size=69.3 K), total size for store is 69.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:40:01,542 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0823cc1fab0ce53b4677252ec2fad0f8: 2024-11-15T22:40:01,542 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8., storeName=0823cc1fab0ce53b4677252ec2fad0f8/info, priority=13, startTime=1731710401512; duration=0sec 2024-11-15T22:40:01,542 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.3 K, sizeToCheck=16.0 K 2024-11-15T22:40:01,542 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:40:01,542 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ef2f20102d714cef8e2adc2d302c1fb9 because midkey is the same as first or last row 2024-11-15T22:40:01,542 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.3 K, sizeToCheck=16.0 K 2024-11-15T22:40:01,542 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:40:01,542 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ef2f20102d714cef8e2adc2d302c1fb9 because midkey is the same as first or last row 2024-11-15T22:40:01,542 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.3 K, sizeToCheck=16.0 K 2024-11-15T22:40:01,542 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:40:01,542 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ef2f20102d714cef8e2adc2d302c1fb9 because midkey is the same as first or last row 2024-11-15T22:40:01,542 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:01,542 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0823cc1fab0ce53b4677252ec2fad0f8:info 2024-11-15T22:40:02,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:02,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:03,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:03,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:03,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:03,511 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0823cc1fab0ce53b4677252ec2fad0f8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T22:40:03,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/d1fe7748cec742f7b765f47b7afae07e is 1080, key is row0062/info:/1731710401492/Put/seqid=0 2024-11-15T22:40:03,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741844_1020 (size=12509) 2024-11-15T22:40:03,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741844_1020 (size=12509) 2024-11-15T22:40:03,526 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/d1fe7748cec742f7b765f47b7afae07e 2024-11-15T22:40:03,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/d1fe7748cec742f7b765f47b7afae07e as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/d1fe7748cec742f7b765f47b7afae07e 2024-11-15T22:40:03,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/d1fe7748cec742f7b765f47b7afae07e, entries=7, sequenceid=89, filesize=12.2 K 2024-11-15T22:40:03,538 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 0823cc1fab0ce53b4677252ec2fad0f8 in 28ms, sequenceid=89, compaction requested=false 2024-11-15T22:40:03,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0823cc1fab0ce53b4677252ec2fad0f8: 2024-11-15T22:40:03,539 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=81.6 K, sizeToCheck=16.0 K 2024-11-15T22:40:03,539 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:40:03,539 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ef2f20102d714cef8e2adc2d302c1fb9 because midkey is the same as first or last row 2024-11-15T22:40:03,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:03,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0823cc1fab0ce53b4677252ec2fad0f8 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-15T22:40:03,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/9804380c62cc40f9bc14b76dad4ff83b is 1080, key is row0069/info:/1731710403512/Put/seqid=0 2024-11-15T22:40:03,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741845_1021 (size=20064) 2024-11-15T22:40:03,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741845_1021 (size=20064) 2024-11-15T22:40:03,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=106 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/9804380c62cc40f9bc14b76dad4ff83b 2024-11-15T22:40:03,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/9804380c62cc40f9bc14b76dad4ff83b as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/9804380c62cc40f9bc14b76dad4ff83b 2024-11-15T22:40:03,560 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/9804380c62cc40f9bc14b76dad4ff83b, entries=14, sequenceid=106, filesize=19.6 K 2024-11-15T22:40:03,561 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 0823cc1fab0ce53b4677252ec2fad0f8 in 21ms, sequenceid=106, compaction requested=true 2024-11-15T22:40:03,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0823cc1fab0ce53b4677252ec2fad0f8: 2024-11-15T22:40:03,561 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=101.1 K, sizeToCheck=16.0 K 2024-11-15T22:40:03,561 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:40:03,561 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ef2f20102d714cef8e2adc2d302c1fb9 because midkey is the same as first or last row 2024-11-15T22:40:03,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0823cc1fab0ce53b4677252ec2fad0f8:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T22:40:03,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:03,561 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T22:40:03,562 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103574 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T22:40:03,562 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1541): 0823cc1fab0ce53b4677252ec2fad0f8/info is initiating minor compaction (all files) 2024-11-15T22:40:03,562 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0823cc1fab0ce53b4677252ec2fad0f8/info in TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. 2024-11-15T22:40:03,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:03,563 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ef2f20102d714cef8e2adc2d302c1fb9, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/d1fe7748cec742f7b765f47b7afae07e, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/9804380c62cc40f9bc14b76dad4ff83b] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp, totalSize=101.1 K 2024-11-15T22:40:03,563 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0823cc1fab0ce53b4677252ec2fad0f8 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-15T22:40:03,563 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting ef2f20102d714cef8e2adc2d302c1fb9, keycount=61, bloomtype=ROW, size=69.3 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1731710399355 2024-11-15T22:40:03,563 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting d1fe7748cec742f7b765f47b7afae07e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1731710401492 2024-11-15T22:40:03,564 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9804380c62cc40f9bc14b76dad4ff83b, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=106, earliestPutTs=1731710403512 2024-11-15T22:40:03,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/3ac28241d6964bac9680183ab346c57e is 1080, key is row0083/info:/1731710403541/Put/seqid=0 2024-11-15T22:40:03,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741846_1022 (size=17894) 2024-11-15T22:40:03,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741846_1022 (size=17894) 2024-11-15T22:40:03,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/3ac28241d6964bac9680183ab346c57e 2024-11-15T22:40:03,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/3ac28241d6964bac9680183ab346c57e as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/3ac28241d6964bac9680183ab346c57e 2024-11-15T22:40:03,576 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0823cc1fab0ce53b4677252ec2fad0f8#info#compaction#66 average throughput is 28.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:40:03,577 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa is 1080, key is row0001/info:/1731710399355/Put/seqid=0 2024-11-15T22:40:03,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741847_1023 (size=93793) 2024-11-15T22:40:03,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741847_1023 (size=93793) 2024-11-15T22:40:03,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/3ac28241d6964bac9680183ab346c57e, entries=12, sequenceid=121, filesize=17.5 K 2024-11-15T22:40:03,583 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=2.10 KB/2152 for 0823cc1fab0ce53b4677252ec2fad0f8 in 19ms, sequenceid=121, compaction requested=false 2024-11-15T22:40:03,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0823cc1fab0ce53b4677252ec2fad0f8: 2024-11-15T22:40:03,583 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=118.6 K, sizeToCheck=16.0 K 2024-11-15T22:40:03,583 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:40:03,583 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ef2f20102d714cef8e2adc2d302c1fb9 because midkey is the same as first or last row 2024-11-15T22:40:03,586 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa 2024-11-15T22:40:03,592 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0823cc1fab0ce53b4677252ec2fad0f8/info of 0823cc1fab0ce53b4677252ec2fad0f8 into c5fbfa3bd0a9472cac09bbc8fe4d4daa(size=91.6 K), total size for store is 109.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:40:03,592 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0823cc1fab0ce53b4677252ec2fad0f8: 2024-11-15T22:40:03,593 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8., storeName=0823cc1fab0ce53b4677252ec2fad0f8/info, priority=13, startTime=1731710403561; duration=0sec 2024-11-15T22:40:03,593 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.1 K, sizeToCheck=16.0 K 2024-11-15T22:40:03,593 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:40:03,593 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.1 K, sizeToCheck=16.0 K 2024-11-15T22:40:03,593 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:40:03,593 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.1 K, sizeToCheck=16.0 K 2024-11-15T22:40:03,593 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T22:40:03,594 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:03,594 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:03,594 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0823cc1fab0ce53b4677252ec2fad0f8:info 2024-11-15T22:40:03,595 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45633 {}] assignment.AssignmentManager(1355): Split request from e611192d6313,42145,1731710388091, parent={ENCODED => 0823cc1fab0ce53b4677252ec2fad0f8, NAME => 'TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-15T22:40:03,600 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45633 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=e611192d6313,42145,1731710388091 2024-11-15T22:40:03,604 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45633 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=0823cc1fab0ce53b4677252ec2fad0f8, daughterA=6aa73352ad577c320cd9915f5ef98912, daughterB=af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:03,606 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=0823cc1fab0ce53b4677252ec2fad0f8, daughterA=6aa73352ad577c320cd9915f5ef98912, daughterB=af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:03,606 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=0823cc1fab0ce53b4677252ec2fad0f8, daughterA=6aa73352ad577c320cd9915f5ef98912, daughterB=af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:03,606 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=0823cc1fab0ce53b4677252ec2fad0f8, daughterA=6aa73352ad577c320cd9915f5ef98912, daughterB=af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:03,613 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0823cc1fab0ce53b4677252ec2fad0f8, UNASSIGN}] 2024-11-15T22:40:03,614 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0823cc1fab0ce53b4677252ec2fad0f8, UNASSIGN 2024-11-15T22:40:03,615 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=0823cc1fab0ce53b4677252ec2fad0f8, regionState=CLOSING, regionLocation=e611192d6313,42145,1731710388091 2024-11-15T22:40:03,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0823cc1fab0ce53b4677252ec2fad0f8, UNASSIGN because future has completed 2024-11-15T22:40:03,618 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-15T22:40:03,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0823cc1fab0ce53b4677252ec2fad0f8, server=e611192d6313,42145,1731710388091}] 2024-11-15T22:40:03,776 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:03,777 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-15T22:40:03,778 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 0823cc1fab0ce53b4677252ec2fad0f8, disabling compactions & flushes 2024-11-15T22:40:03,778 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. 2024-11-15T22:40:03,778 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. 2024-11-15T22:40:03,778 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. after waiting 0 ms 2024-11-15T22:40:03,778 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. 2024-11-15T22:40:03,778 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 0823cc1fab0ce53b4677252ec2fad0f8 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-11-15T22:40:03,785 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/bb4306ce97c54449b05507362ffabd06 is 1080, key is row0095/info:/1731710403564/Put/seqid=0 2024-11-15T22:40:03,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741848_1024 (size=7112) 2024-11-15T22:40:03,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741848_1024 (size=7112) 2024-11-15T22:40:03,791 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/bb4306ce97c54449b05507362ffabd06 2024-11-15T22:40:03,798 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/.tmp/info/bb4306ce97c54449b05507362ffabd06 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/bb4306ce97c54449b05507362ffabd06 2024-11-15T22:40:03,805 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/bb4306ce97c54449b05507362ffabd06, entries=2, sequenceid=127, filesize=6.9 K 2024-11-15T22:40:03,807 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 0823cc1fab0ce53b4677252ec2fad0f8 in 29ms, sequenceid=127, compaction requested=true 2024-11-15T22:40:03,808 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/31132d2348a54555893d106db1444ba8, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/1aae2db1cfed47bc9016e884c626b5bd, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ac3142d5bd39491998efd850f0e44892, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/c56a2fe7afec4c508708fc33c79e3ec7, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/e2b5bc9b39cb42ef9df2ddc66052eb3e, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ef2f20102d714cef8e2adc2d302c1fb9, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/e6c703a7d81f409b906a5198232f3474, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/d1fe7748cec742f7b765f47b7afae07e, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/9804380c62cc40f9bc14b76dad4ff83b] to archive 2024-11-15T22:40:03,810 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T22:40:03,812 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/31132d2348a54555893d106db1444ba8 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/31132d2348a54555893d106db1444ba8 2024-11-15T22:40:03,813 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/1aae2db1cfed47bc9016e884c626b5bd to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/1aae2db1cfed47bc9016e884c626b5bd 2024-11-15T22:40:03,815 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ac3142d5bd39491998efd850f0e44892 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ac3142d5bd39491998efd850f0e44892 2024-11-15T22:40:03,816 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/c56a2fe7afec4c508708fc33c79e3ec7 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/c56a2fe7afec4c508708fc33c79e3ec7 2024-11-15T22:40:03,818 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/e2b5bc9b39cb42ef9df2ddc66052eb3e to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/e2b5bc9b39cb42ef9df2ddc66052eb3e 2024-11-15T22:40:03,819 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ef2f20102d714cef8e2adc2d302c1fb9 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/ef2f20102d714cef8e2adc2d302c1fb9 2024-11-15T22:40:03,820 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/e6c703a7d81f409b906a5198232f3474 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/e6c703a7d81f409b906a5198232f3474 2024-11-15T22:40:03,822 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/d1fe7748cec742f7b765f47b7afae07e to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/d1fe7748cec742f7b765f47b7afae07e 2024-11-15T22:40:03,823 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/9804380c62cc40f9bc14b76dad4ff83b to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/9804380c62cc40f9bc14b76dad4ff83b 2024-11-15T22:40:03,829 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-11-15T22:40:03,829 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. 2024-11-15T22:40:03,830 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 0823cc1fab0ce53b4677252ec2fad0f8: Waiting for close lock at 1731710403778Running coprocessor pre-close hooks at 1731710403778Disabling compacts and flushes for region at 1731710403778Disabling writes for close at 1731710403778Obtaining lock to block concurrent updates at 1731710403778Preparing flush snapshotting stores in 0823cc1fab0ce53b4677252ec2fad0f8 at 1731710403778Finished memstore snapshotting TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8., syncing WAL and waiting on mvcc, flushsize=dataSize=2152, getHeapSize=2544, getOffHeapSize=0, getCellsCount=2 at 1731710403779 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. at 1731710403780 (+1 ms)Flushing 0823cc1fab0ce53b4677252ec2fad0f8/info: creating writer at 1731710403780Flushing 0823cc1fab0ce53b4677252ec2fad0f8/info: appending metadata at 1731710403784 (+4 ms)Flushing 0823cc1fab0ce53b4677252ec2fad0f8/info: closing flushed file at 1731710403784Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5798d98c: reopening flushed file at 1731710403797 (+13 ms)Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 0823cc1fab0ce53b4677252ec2fad0f8 in 29ms, sequenceid=127, compaction requested=true at 1731710403807 (+10 ms)Writing region close event to WAL at 1731710403825 (+18 ms)Running coprocessor post-close hooks at 1731710403829 (+4 ms)Closed at 1731710403829 2024-11-15T22:40:03,832 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:03,832 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=0823cc1fab0ce53b4677252ec2fad0f8, regionState=CLOSED 2024-11-15T22:40:03,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0823cc1fab0ce53b4677252ec2fad0f8, server=e611192d6313,42145,1731710388091 because future has completed 2024-11-15T22:40:03,837 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-15T22:40:03,837 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 0823cc1fab0ce53b4677252ec2fad0f8, server=e611192d6313,42145,1731710388091 in 217 msec 2024-11-15T22:40:03,839 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-15T22:40:03,839 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0823cc1fab0ce53b4677252ec2fad0f8, UNASSIGN in 224 msec 2024-11-15T22:40:03,845 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:03,848 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=0823cc1fab0ce53b4677252ec2fad0f8, threads=3 2024-11-15T22:40:03,850 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa for region: 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:03,850 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/3ac28241d6964bac9680183ab346c57e for region: 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:03,850 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/bb4306ce97c54449b05507362ffabd06 for region: 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:03,861 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/3ac28241d6964bac9680183ab346c57e, top=true 2024-11-15T22:40:03,861 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/bb4306ce97c54449b05507362ffabd06, top=true 2024-11-15T22:40:03,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741849_1025 (size=27) 2024-11-15T22:40:03,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741849_1025 (size=27) 2024-11-15T22:40:03,873 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-bb4306ce97c54449b05507362ffabd06 for child: af7145e43a48f7b93cf9bedd838f7d2c, parent: 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:03,873 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-3ac28241d6964bac9680183ab346c57e for child: af7145e43a48f7b93cf9bedd838f7d2c, parent: 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:03,873 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/3ac28241d6964bac9680183ab346c57e for region: 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:03,873 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/bb4306ce97c54449b05507362ffabd06 for region: 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:03,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741850_1026 (size=27) 2024-11-15T22:40:03,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741850_1026 (size=27) 2024-11-15T22:40:03,876 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa for region: 0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:03,877 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 0823cc1fab0ce53b4677252ec2fad0f8 Daughter A: [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/6aa73352ad577c320cd9915f5ef98912/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa.0823cc1fab0ce53b4677252ec2fad0f8] storefiles, Daughter B: [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-3ac28241d6964bac9680183ab346c57e, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-bb4306ce97c54449b05507362ffabd06, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa.0823cc1fab0ce53b4677252ec2fad0f8] storefiles. 2024-11-15T22:40:03,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741851_1027 (size=71) 2024-11-15T22:40:03,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741851_1027 (size=71) 2024-11-15T22:40:03,887 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:03,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741852_1028 (size=71) 2024-11-15T22:40:03,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741852_1028 (size=71) 2024-11-15T22:40:03,900 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:03,910 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/6aa73352ad577c320cd9915f5ef98912/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-15T22:40:03,912 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-15T22:40:03,915 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731710403914"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731710403914"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731710403914"}]},"ts":"1731710403914"} 2024-11-15T22:40:03,915 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731710403914"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731710403914"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731710403914"}]},"ts":"1731710403914"} 2024-11-15T22:40:03,915 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731710403914"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731710403914"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731710403914"}]},"ts":"1731710403914"} 2024-11-15T22:40:03,930 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6aa73352ad577c320cd9915f5ef98912, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=af7145e43a48f7b93cf9bedd838f7d2c, ASSIGN}] 2024-11-15T22:40:03,931 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=af7145e43a48f7b93cf9bedd838f7d2c, ASSIGN 2024-11-15T22:40:03,931 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6aa73352ad577c320cd9915f5ef98912, ASSIGN 2024-11-15T22:40:03,932 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=af7145e43a48f7b93cf9bedd838f7d2c, ASSIGN; state=SPLITTING_NEW, location=e611192d6313,42145,1731710388091; forceNewPlan=false, retain=false 2024-11-15T22:40:03,932 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6aa73352ad577c320cd9915f5ef98912, ASSIGN; state=SPLITTING_NEW, location=e611192d6313,42145,1731710388091; forceNewPlan=false, retain=false 2024-11-15T22:40:04,083 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=6aa73352ad577c320cd9915f5ef98912, regionState=OPENING, regionLocation=e611192d6313,42145,1731710388091 2024-11-15T22:40:04,083 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=af7145e43a48f7b93cf9bedd838f7d2c, regionState=OPENING, regionLocation=e611192d6313,42145,1731710388091 2024-11-15T22:40:04,086 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=af7145e43a48f7b93cf9bedd838f7d2c, ASSIGN because future has completed 2024-11-15T22:40:04,087 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure af7145e43a48f7b93cf9bedd838f7d2c, server=e611192d6313,42145,1731710388091}] 2024-11-15T22:40:04,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6aa73352ad577c320cd9915f5ef98912, ASSIGN because future has completed 2024-11-15T22:40:04,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6aa73352ad577c320cd9915f5ef98912, server=e611192d6313,42145,1731710388091}] 2024-11-15T22:40:04,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:04,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:04,251 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:04,251 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => af7145e43a48f7b93cf9bedd838f7d2c, NAME => 'TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-15T22:40:04,252 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:04,252 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:40:04,252 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:04,252 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:04,253 INFO [StoreOpener-af7145e43a48f7b93cf9bedd838f7d2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:04,254 INFO [StoreOpener-af7145e43a48f7b93cf9bedd838f7d2c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region af7145e43a48f7b93cf9bedd838f7d2c columnFamilyName info 2024-11-15T22:40:04,254 DEBUG [StoreOpener-af7145e43a48f7b93cf9bedd838f7d2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:04,262 DEBUG [StoreOpener-af7145e43a48f7b93cf9bedd838f7d2c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-3ac28241d6964bac9680183ab346c57e 2024-11-15T22:40:04,266 DEBUG [StoreOpener-af7145e43a48f7b93cf9bedd838f7d2c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-bb4306ce97c54449b05507362ffabd06 2024-11-15T22:40:04,274 DEBUG [StoreOpener-af7145e43a48f7b93cf9bedd838f7d2c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa.0823cc1fab0ce53b4677252ec2fad0f8->hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa-top 2024-11-15T22:40:04,275 INFO [StoreOpener-af7145e43a48f7b93cf9bedd838f7d2c-1 {}] regionserver.HStore(327): Store=af7145e43a48f7b93cf9bedd838f7d2c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:40:04,275 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:04,276 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:04,277 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:04,278 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:04,278 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:04,279 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:04,280 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened af7145e43a48f7b93cf9bedd838f7d2c; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=712880, jitterRate=-0.09352737665176392}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T22:40:04,280 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:04,281 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for af7145e43a48f7b93cf9bedd838f7d2c: Running coprocessor pre-open hook at 1731710404252Writing region info on filesystem at 1731710404252Initializing all the Stores at 1731710404253 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710404253Cleaning up temporary data from old regions at 1731710404278 (+25 ms)Running coprocessor post-open hooks at 1731710404280 (+2 ms)Region opened successfully at 1731710404280 2024-11-15T22:40:04,281 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c., pid=12, masterSystemTime=1731710404241 2024-11-15T22:40:04,282 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store af7145e43a48f7b93cf9bedd838f7d2c:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T22:40:04,282 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:04,282 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T22:40:04,283 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:04,283 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1541): af7145e43a48f7b93cf9bedd838f7d2c/info is initiating minor compaction (all files) 2024-11-15T22:40:04,283 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of af7145e43a48f7b93cf9bedd838f7d2c/info in TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:04,283 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa.0823cc1fab0ce53b4677252ec2fad0f8->hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa-top, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-3ac28241d6964bac9680183ab346c57e, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-bb4306ce97c54449b05507362ffabd06] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp, totalSize=116.0 K 2024-11-15T22:40:04,284 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:04,284 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:04,284 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912. 2024-11-15T22:40:04,284 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting c5fbfa3bd0a9472cac09bbc8fe4d4daa.0823cc1fab0ce53b4677252ec2fad0f8, keycount=41, bloomtype=ROW, size=91.6 K, encoding=NONE, compression=NONE, seqNum=107, earliestPutTs=1731710399355 2024-11-15T22:40:04,284 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 6aa73352ad577c320cd9915f5ef98912, NAME => 'TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-15T22:40:04,284 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-3ac28241d6964bac9680183ab346c57e, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1731710403541 2024-11-15T22:40:04,284 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 6aa73352ad577c320cd9915f5ef98912 2024-11-15T22:40:04,284 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:40:04,284 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 6aa73352ad577c320cd9915f5ef98912 2024-11-15T22:40:04,285 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 6aa73352ad577c320cd9915f5ef98912 2024-11-15T22:40:04,285 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=af7145e43a48f7b93cf9bedd838f7d2c, regionState=OPEN, openSeqNum=131, regionLocation=e611192d6313,42145,1731710388091 2024-11-15T22:40:04,285 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-bb4306ce97c54449b05507362ffabd06, keycount=2, bloomtype=ROW, size=6.9 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731710403564 2024-11-15T22:40:04,285 INFO [StoreOpener-6aa73352ad577c320cd9915f5ef98912-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6aa73352ad577c320cd9915f5ef98912 2024-11-15T22:40:04,286 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-15T22:40:04,286 INFO [StoreOpener-6aa73352ad577c320cd9915f5ef98912-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6aa73352ad577c320cd9915f5ef98912 columnFamilyName info 2024-11-15T22:40:04,286 DEBUG [StoreOpener-6aa73352ad577c320cd9915f5ef98912-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:04,287 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-15T22:40:04,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-15T22:40:04,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure af7145e43a48f7b93cf9bedd838f7d2c, server=e611192d6313,42145,1731710388091 because future has completed 2024-11-15T22:40:04,291 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-15T22:40:04,292 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure af7145e43a48f7b93cf9bedd838f7d2c, server=e611192d6313,42145,1731710388091 in 201 msec 2024-11-15T22:40:04,294 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=af7145e43a48f7b93cf9bedd838f7d2c, ASSIGN in 361 msec 2024-11-15T22:40:04,300 DEBUG [StoreOpener-6aa73352ad577c320cd9915f5ef98912-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/6aa73352ad577c320cd9915f5ef98912/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa.0823cc1fab0ce53b4677252ec2fad0f8->hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa-bottom 2024-11-15T22:40:04,300 INFO [StoreOpener-6aa73352ad577c320cd9915f5ef98912-1 {}] regionserver.HStore(327): Store=6aa73352ad577c320cd9915f5ef98912/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:40:04,300 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 6aa73352ad577c320cd9915f5ef98912 2024-11-15T22:40:04,301 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/6aa73352ad577c320cd9915f5ef98912 2024-11-15T22:40:04,302 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/6aa73352ad577c320cd9915f5ef98912 2024-11-15T22:40:04,302 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 6aa73352ad577c320cd9915f5ef98912 2024-11-15T22:40:04,302 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 6aa73352ad577c320cd9915f5ef98912 2024-11-15T22:40:04,304 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 6aa73352ad577c320cd9915f5ef98912 2024-11-15T22:40:04,305 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 6aa73352ad577c320cd9915f5ef98912; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=831067, jitterRate=0.056756868958473206}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T22:40:04,305 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6aa73352ad577c320cd9915f5ef98912 2024-11-15T22:40:04,305 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 6aa73352ad577c320cd9915f5ef98912: Running coprocessor pre-open hook at 1731710404285Writing region info on filesystem at 1731710404285Initializing all the Stores at 1731710404285Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710404285Cleaning up temporary data from old regions at 1731710404302 (+17 ms)Running coprocessor post-open hooks at 1731710404305 (+3 ms)Region opened successfully at 1731710404305 2024-11-15T22:40:04,306 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912., pid=13, masterSystemTime=1731710404241 2024-11-15T22:40:04,306 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 6aa73352ad577c320cd9915f5ef98912:info, priority=-2147483648, current under compaction store size is 2 2024-11-15T22:40:04,306 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:04,306 DEBUG [RS:0;e611192d6313:42145-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-15T22:40:04,307 INFO [RS:0;e611192d6313:42145-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912. 2024-11-15T22:40:04,307 DEBUG [RS:0;e611192d6313:42145-longCompactions-0 {}] regionserver.HStore(1541): 6aa73352ad577c320cd9915f5ef98912/info is initiating minor compaction (all files) 2024-11-15T22:40:04,307 INFO [RS:0;e611192d6313:42145-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6aa73352ad577c320cd9915f5ef98912/info in TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912. 2024-11-15T22:40:04,307 INFO [RS:0;e611192d6313:42145-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/6aa73352ad577c320cd9915f5ef98912/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa.0823cc1fab0ce53b4677252ec2fad0f8->hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa-bottom] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/6aa73352ad577c320cd9915f5ef98912/.tmp, totalSize=91.6 K 2024-11-15T22:40:04,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/.tmp/info/3138cfb07ded45ca8c2001bb4fac69e4 is 193, key is TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c./info:regioninfo/1731710404284/Put/seqid=0 2024-11-15T22:40:04,308 DEBUG [RS:0;e611192d6313:42145-longCompactions-0 {}] compactions.Compactor(225): Compacting c5fbfa3bd0a9472cac09bbc8fe4d4daa.0823cc1fab0ce53b4677252ec2fad0f8, keycount=41, bloomtype=ROW, size=91.6 K, encoding=NONE, compression=NONE, seqNum=106, earliestPutTs=1731710399355 2024-11-15T22:40:04,308 DEBUG [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912. 2024-11-15T22:40:04,308 INFO [RS_OPEN_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912. 2024-11-15T22:40:04,309 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=6aa73352ad577c320cd9915f5ef98912, regionState=OPEN, openSeqNum=131, regionLocation=e611192d6313,42145,1731710388091 2024-11-15T22:40:04,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6aa73352ad577c320cd9915f5ef98912, server=e611192d6313,42145,1731710388091 because future has completed 2024-11-15T22:40:04,315 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-15T22:40:04,315 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af7145e43a48f7b93cf9bedd838f7d2c#info#compaction#69 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:40:04,315 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 6aa73352ad577c320cd9915f5ef98912, server=e611192d6313,42145,1731710388091 in 224 msec 2024-11-15T22:40:04,316 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/3f12f94d498e4cf99ad7c3d8e373bce4 is 1080, key is row0062/info:/1731710401492/Put/seqid=0 2024-11-15T22:40:04,319 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-15T22:40:04,319 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6aa73352ad577c320cd9915f5ef98912, ASSIGN in 385 msec 2024-11-15T22:40:04,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=0823cc1fab0ce53b4677252ec2fad0f8, daughterA=6aa73352ad577c320cd9915f5ef98912, daughterB=af7145e43a48f7b93cf9bedd838f7d2c in 719 msec 2024-11-15T22:40:04,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741853_1029 (size=9882) 2024-11-15T22:40:04,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741853_1029 (size=9882) 2024-11-15T22:40:04,322 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/.tmp/info/3138cfb07ded45ca8c2001bb4fac69e4 2024-11-15T22:40:04,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741854_1030 (size=42984) 2024-11-15T22:40:04,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741854_1030 (size=42984) 2024-11-15T22:40:04,331 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/3f12f94d498e4cf99ad7c3d8e373bce4 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/3f12f94d498e4cf99ad7c3d8e373bce4 2024-11-15T22:40:04,333 INFO [RS:0;e611192d6313:42145-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aa73352ad577c320cd9915f5ef98912#info#compaction#70 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:40:04,333 DEBUG [RS:0;e611192d6313:42145-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/6aa73352ad577c320cd9915f5ef98912/.tmp/info/5fa0295c9e3941d09e1022f8f0d7043c is 1080, key is row0001/info:/1731710399355/Put/seqid=0 2024-11-15T22:40:04,340 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in af7145e43a48f7b93cf9bedd838f7d2c/info of af7145e43a48f7b93cf9bedd838f7d2c into 3f12f94d498e4cf99ad7c3d8e373bce4(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:40:04,340 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:04,340 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c., storeName=af7145e43a48f7b93cf9bedd838f7d2c/info, priority=13, startTime=1731710404282; duration=0sec 2024-11-15T22:40:04,341 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:04,341 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af7145e43a48f7b93cf9bedd838f7d2c:info 2024-11-15T22:40:04,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/.tmp/ns/04bf0738941c4b93afffc0d4f975e6a7 is 43, key is default/ns:d/1731710389129/Put/seqid=0 2024-11-15T22:40:04,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741855_1031 (size=70862) 2024-11-15T22:40:04,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741855_1031 (size=70862) 2024-11-15T22:40:04,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741856_1032 (size=5153) 2024-11-15T22:40:04,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741856_1032 (size=5153) 2024-11-15T22:40:04,349 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/.tmp/ns/04bf0738941c4b93afffc0d4f975e6a7 2024-11-15T22:40:04,352 DEBUG [RS:0;e611192d6313:42145-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/6aa73352ad577c320cd9915f5ef98912/.tmp/info/5fa0295c9e3941d09e1022f8f0d7043c as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/6aa73352ad577c320cd9915f5ef98912/info/5fa0295c9e3941d09e1022f8f0d7043c 2024-11-15T22:40:04,358 INFO [RS:0;e611192d6313:42145-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 6aa73352ad577c320cd9915f5ef98912/info of 6aa73352ad577c320cd9915f5ef98912 into 5fa0295c9e3941d09e1022f8f0d7043c(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:40:04,358 DEBUG [RS:0;e611192d6313:42145-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6aa73352ad577c320cd9915f5ef98912: 2024-11-15T22:40:04,358 INFO [RS:0;e611192d6313:42145-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912., storeName=6aa73352ad577c320cd9915f5ef98912/info, priority=15, startTime=1731710404306; duration=0sec 2024-11-15T22:40:04,358 DEBUG [RS:0;e611192d6313:42145-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:04,359 DEBUG [RS:0;e611192d6313:42145-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aa73352ad577c320cd9915f5ef98912:info 2024-11-15T22:40:04,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/.tmp/table/69d85c8a4a5e4dbbaeebfdb2ae16b486 is 65, key is TestLogRolling-testLogRolling/table:state/1731710389602/Put/seqid=0 2024-11-15T22:40:04,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741857_1033 (size=5340) 2024-11-15T22:40:04,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741857_1033 (size=5340) 2024-11-15T22:40:04,376 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/.tmp/table/69d85c8a4a5e4dbbaeebfdb2ae16b486 2024-11-15T22:40:04,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/.tmp/info/3138cfb07ded45ca8c2001bb4fac69e4 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/info/3138cfb07ded45ca8c2001bb4fac69e4 2024-11-15T22:40:04,387 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/info/3138cfb07ded45ca8c2001bb4fac69e4, entries=30, sequenceid=17, filesize=9.7 K 2024-11-15T22:40:04,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/.tmp/ns/04bf0738941c4b93afffc0d4f975e6a7 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/ns/04bf0738941c4b93afffc0d4f975e6a7 2024-11-15T22:40:04,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/ns/04bf0738941c4b93afffc0d4f975e6a7, entries=2, sequenceid=17, filesize=5.0 K 2024-11-15T22:40:04,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/.tmp/table/69d85c8a4a5e4dbbaeebfdb2ae16b486 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/table/69d85c8a4a5e4dbbaeebfdb2ae16b486 2024-11-15T22:40:04,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/table/69d85c8a4a5e4dbbaeebfdb2ae16b486, entries=2, sequenceid=17, filesize=5.2 K 2024-11-15T22:40:04,403 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 116ms, sequenceid=17, compaction requested=false 2024-11-15T22:40:04,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-15T22:40:05,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:05,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:05,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:40746 deadline: 1731710415568, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. is not online on e611192d6313,42145,1731710388091 2024-11-15T22:40:05,592 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8., hostname=e611192d6313,42145,1731710388091, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8., hostname=e611192d6313,42145,1731710388091, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. is not online on e611192d6313,42145,1731710388091 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T22:40:05,593 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8., hostname=e611192d6313,42145,1731710388091, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8. is not online on e611192d6313,42145,1731710388091 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T22:40:05,593 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731710389239.0823cc1fab0ce53b4677252ec2fad0f8., hostname=e611192d6313,42145,1731710388091, seqNum=2 from cache 2024-11-15T22:40:06,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:06,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:07,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:07,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:08,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:08,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:08,831 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,831 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,832 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,832 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:08,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:09,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:09,370 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T22:40:09,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,374 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,404 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,404 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,404 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:09,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T22:40:10,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:10,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:11,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:11,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:12,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:12,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:13,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:13,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:14,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:14,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:15,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:15,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:15,683 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c., hostname=e611192d6313,42145,1731710388091, seqNum=131] 2024-11-15T22:40:15,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:15,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T22:40:15,701 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/0b15562fd44c4c36aa912c537f01a009 is 1080, key is row0097/info:/1731710415684/Put/seqid=0 2024-11-15T22:40:15,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741858_1034 (size=12516) 2024-11-15T22:40:15,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741858_1034 (size=12516) 2024-11-15T22:40:15,707 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/0b15562fd44c4c36aa912c537f01a009 2024-11-15T22:40:15,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/0b15562fd44c4c36aa912c537f01a009 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/0b15562fd44c4c36aa912c537f01a009 2024-11-15T22:40:15,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/0b15562fd44c4c36aa912c537f01a009, entries=7, sequenceid=141, filesize=12.2 K 2024-11-15T22:40:15,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for af7145e43a48f7b93cf9bedd838f7d2c in 25ms, sequenceid=141, compaction requested=false 2024-11-15T22:40:15,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:15,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:15,721 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-15T22:40:15,725 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/229e04f781534606a394b6ccb708632f is 1080, key is row0104/info:/1731710415696/Put/seqid=0 2024-11-15T22:40:15,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741859_1035 (size=17906) 2024-11-15T22:40:15,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741859_1035 (size=17906) 2024-11-15T22:40:15,731 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/229e04f781534606a394b6ccb708632f 2024-11-15T22:40:15,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/229e04f781534606a394b6ccb708632f as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/229e04f781534606a394b6ccb708632f 2024-11-15T22:40:15,742 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/229e04f781534606a394b6ccb708632f, entries=12, sequenceid=156, filesize=17.5 K 2024-11-15T22:40:15,743 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for af7145e43a48f7b93cf9bedd838f7d2c in 22ms, sequenceid=156, compaction requested=true 2024-11-15T22:40:15,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:15,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af7145e43a48f7b93cf9bedd838f7d2c:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T22:40:15,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:15,743 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T22:40:15,745 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 73406 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T22:40:15,745 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1541): af7145e43a48f7b93cf9bedd838f7d2c/info is initiating minor compaction (all files) 2024-11-15T22:40:15,745 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of af7145e43a48f7b93cf9bedd838f7d2c/info in TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:15,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:15,745 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/3f12f94d498e4cf99ad7c3d8e373bce4, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/0b15562fd44c4c36aa912c537f01a009, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/229e04f781534606a394b6ccb708632f] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp, totalSize=71.7 K 2024-11-15T22:40:15,745 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-15T22:40:15,745 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3f12f94d498e4cf99ad7c3d8e373bce4, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731710401492 2024-11-15T22:40:15,746 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0b15562fd44c4c36aa912c537f01a009, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1731710415684 2024-11-15T22:40:15,746 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 229e04f781534606a394b6ccb708632f, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731710415696 2024-11-15T22:40:15,749 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/dad9c980c24646b28dd0628481c18fc0 is 1080, key is row0116/info:/1731710415722/Put/seqid=0 2024-11-15T22:40:15,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741860_1036 (size=17906) 2024-11-15T22:40:15,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741860_1036 (size=17906) 2024-11-15T22:40:15,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/dad9c980c24646b28dd0628481c18fc0 2024-11-15T22:40:15,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/dad9c980c24646b28dd0628481c18fc0 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/dad9c980c24646b28dd0628481c18fc0 2024-11-15T22:40:15,762 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af7145e43a48f7b93cf9bedd838f7d2c#info#compaction#76 average throughput is 27.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:40:15,762 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/c71bc018e2884529aa47faa5f43f8bb3 is 1080, key is row0062/info:/1731710401492/Put/seqid=0 2024-11-15T22:40:15,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/dad9c980c24646b28dd0628481c18fc0, entries=12, sequenceid=171, filesize=17.5 K 2024-11-15T22:40:15,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for af7145e43a48f7b93cf9bedd838f7d2c in 21ms, sequenceid=171, compaction requested=false 2024-11-15T22:40:15,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:15,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741861_1037 (size=63636) 2024-11-15T22:40:15,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741861_1037 (size=63636) 2024-11-15T22:40:15,776 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/c71bc018e2884529aa47faa5f43f8bb3 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c71bc018e2884529aa47faa5f43f8bb3 2024-11-15T22:40:15,781 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in af7145e43a48f7b93cf9bedd838f7d2c/info of af7145e43a48f7b93cf9bedd838f7d2c into c71bc018e2884529aa47faa5f43f8bb3(size=62.1 K), total size for store is 79.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:40:15,781 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:15,781 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c., storeName=af7145e43a48f7b93cf9bedd838f7d2c/info, priority=13, startTime=1731710415743; duration=0sec 2024-11-15T22:40:15,782 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:15,782 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af7145e43a48f7b93cf9bedd838f7d2c:info 2024-11-15T22:40:16,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:16,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:17,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:17,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:17,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:17,762 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T22:40:17,766 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/5862f3cd277a47428a64e6796768ca27 is 1080, key is row0128/info:/1731710415746/Put/seqid=0 2024-11-15T22:40:17,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741862_1038 (size=12516) 2024-11-15T22:40:17,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741862_1038 (size=12516) 2024-11-15T22:40:17,772 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/5862f3cd277a47428a64e6796768ca27 2024-11-15T22:40:17,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/5862f3cd277a47428a64e6796768ca27 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/5862f3cd277a47428a64e6796768ca27 2024-11-15T22:40:17,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/5862f3cd277a47428a64e6796768ca27, entries=7, sequenceid=182, filesize=12.2 K 2024-11-15T22:40:17,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9684 for af7145e43a48f7b93cf9bedd838f7d2c in 23ms, sequenceid=182, compaction requested=true 2024-11-15T22:40:17,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:17,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af7145e43a48f7b93cf9bedd838f7d2c:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T22:40:17,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:17,785 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T22:40:17,786 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94058 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T22:40:17,787 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1541): af7145e43a48f7b93cf9bedd838f7d2c/info is initiating minor compaction (all files) 2024-11-15T22:40:17,787 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of af7145e43a48f7b93cf9bedd838f7d2c/info in TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:17,787 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c71bc018e2884529aa47faa5f43f8bb3, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/dad9c980c24646b28dd0628481c18fc0, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/5862f3cd277a47428a64e6796768ca27] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp, totalSize=91.9 K 2024-11-15T22:40:17,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:17,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-15T22:40:17,787 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting c71bc018e2884529aa47faa5f43f8bb3, keycount=54, bloomtype=ROW, size=62.1 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731710401492 2024-11-15T22:40:17,788 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting dad9c980c24646b28dd0628481c18fc0, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1731710415722 2024-11-15T22:40:17,789 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5862f3cd277a47428a64e6796768ca27, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1731710415746 2024-11-15T22:40:17,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/f8ed5427366d450c9875d44f5c6f2ad5 is 1080, key is row0135/info:/1731710417763/Put/seqid=0 2024-11-15T22:40:17,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741863_1039 (size=15750) 2024-11-15T22:40:17,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741863_1039 (size=15750) 2024-11-15T22:40:17,797 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/f8ed5427366d450c9875d44f5c6f2ad5 2024-11-15T22:40:17,801 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af7145e43a48f7b93cf9bedd838f7d2c#info#compaction#79 average throughput is 37.45 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:40:17,802 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/00e6989765684c5fb4d9e8679b6b4db7 is 1080, key is row0062/info:/1731710401492/Put/seqid=0 2024-11-15T22:40:17,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/f8ed5427366d450c9875d44f5c6f2ad5 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/f8ed5427366d450c9875d44f5c6f2ad5 2024-11-15T22:40:17,810 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/f8ed5427366d450c9875d44f5c6f2ad5, entries=10, sequenceid=195, filesize=15.4 K 2024-11-15T22:40:17,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=8.41 KB/8608 for af7145e43a48f7b93cf9bedd838f7d2c in 24ms, sequenceid=195, compaction requested=false 2024-11-15T22:40:17,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:17,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:17,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-15T22:40:17,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741864_1040 (size=84293) 2024-11-15T22:40:17,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741864_1040 (size=84293) 2024-11-15T22:40:17,818 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/0f4ec63538774742af777c5354aa8515 is 1080, key is row0145/info:/1731710417789/Put/seqid=0 2024-11-15T22:40:17,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741865_1041 (size=15750) 2024-11-15T22:40:17,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741865_1041 (size=15750) 2024-11-15T22:40:17,823 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/0f4ec63538774742af777c5354aa8515 2024-11-15T22:40:17,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/0f4ec63538774742af777c5354aa8515 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/0f4ec63538774742af777c5354aa8515 2024-11-15T22:40:17,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/0f4ec63538774742af777c5354aa8515, entries=10, sequenceid=208, filesize=15.4 K 2024-11-15T22:40:17,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=6.30 KB/6456 for af7145e43a48f7b93cf9bedd838f7d2c in 20ms, sequenceid=208, compaction requested=false 2024-11-15T22:40:17,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:17,888 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T22:40:18,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:18,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:18,226 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/00e6989765684c5fb4d9e8679b6b4db7 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/00e6989765684c5fb4d9e8679b6b4db7 2024-11-15T22:40:18,233 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in af7145e43a48f7b93cf9bedd838f7d2c/info of af7145e43a48f7b93cf9bedd838f7d2c into 00e6989765684c5fb4d9e8679b6b4db7(size=82.3 K), total size for store is 113.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:40:18,233 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:18,233 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c., storeName=af7145e43a48f7b93cf9bedd838f7d2c/info, priority=13, startTime=1731710417785; duration=0sec 2024-11-15T22:40:18,233 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:18,233 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af7145e43a48f7b93cf9bedd838f7d2c:info 2024-11-15T22:40:19,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:19,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:19,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:19,834 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T22:40:19,840 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/c75beb58ccb049728b9710e499e75a0e is 1080, key is row0155/info:/1731710417816/Put/seqid=0 2024-11-15T22:40:19,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741866_1042 (size=12516) 2024-11-15T22:40:19,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741866_1042 (size=12516) 2024-11-15T22:40:19,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/c75beb58ccb049728b9710e499e75a0e 2024-11-15T22:40:19,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/c75beb58ccb049728b9710e499e75a0e as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c75beb58ccb049728b9710e499e75a0e 2024-11-15T22:40:19,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c75beb58ccb049728b9710e499e75a0e, entries=7, sequenceid=219, filesize=12.2 K 2024-11-15T22:40:19,858 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for af7145e43a48f7b93cf9bedd838f7d2c in 25ms, sequenceid=219, compaction requested=true 2024-11-15T22:40:19,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:19,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af7145e43a48f7b93cf9bedd838f7d2c:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T22:40:19,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:19,859 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-15T22:40:19,860 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 128309 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-15T22:40:19,860 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1541): af7145e43a48f7b93cf9bedd838f7d2c/info is initiating minor compaction (all files) 2024-11-15T22:40:19,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:19,860 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of af7145e43a48f7b93cf9bedd838f7d2c/info in TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:19,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-15T22:40:19,860 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/00e6989765684c5fb4d9e8679b6b4db7, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/f8ed5427366d450c9875d44f5c6f2ad5, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/0f4ec63538774742af777c5354aa8515, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c75beb58ccb049728b9710e499e75a0e] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp, totalSize=125.3 K 2024-11-15T22:40:19,861 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 00e6989765684c5fb4d9e8679b6b4db7, keycount=73, bloomtype=ROW, size=82.3 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1731710401492 2024-11-15T22:40:19,861 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting f8ed5427366d450c9875d44f5c6f2ad5, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1731710417763 2024-11-15T22:40:19,861 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0f4ec63538774742af777c5354aa8515, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1731710417789 2024-11-15T22:40:19,862 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting c75beb58ccb049728b9710e499e75a0e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1731710417816 2024-11-15T22:40:19,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/c43015f5c4ea421e9a714ccba7cff7b0 is 1080, key is row0162/info:/1731710419835/Put/seqid=0 2024-11-15T22:40:19,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741867_1043 (size=17906) 2024-11-15T22:40:19,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741867_1043 (size=17906) 2024-11-15T22:40:19,869 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/c43015f5c4ea421e9a714ccba7cff7b0 2024-11-15T22:40:19,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/c43015f5c4ea421e9a714ccba7cff7b0 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c43015f5c4ea421e9a714ccba7cff7b0 2024-11-15T22:40:19,876 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af7145e43a48f7b93cf9bedd838f7d2c#info#compaction#83 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:40:19,877 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/8a0792b9aaec4d1eb857e514d4e47b5f is 1080, key is row0062/info:/1731710401492/Put/seqid=0 2024-11-15T22:40:19,881 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c43015f5c4ea421e9a714ccba7cff7b0, entries=12, sequenceid=234, filesize=17.5 K 2024-11-15T22:40:19,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741868_1044 (size=113543) 2024-11-15T22:40:19,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741868_1044 (size=113543) 2024-11-15T22:40:19,882 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=9.46 KB/9684 for af7145e43a48f7b93cf9bedd838f7d2c in 22ms, sequenceid=234, compaction requested=false 2024-11-15T22:40:19,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:19,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:19,884 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-15T22:40:19,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/26b38eb1cde5402b8d3d3f96804fe7ce is 1080, key is row0174/info:/1731710419861/Put/seqid=0 2024-11-15T22:40:19,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741869_1045 (size=16828) 2024-11-15T22:40:19,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741869_1045 (size=16828) 2024-11-15T22:40:19,893 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/26b38eb1cde5402b8d3d3f96804fe7ce 2024-11-15T22:40:19,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/26b38eb1cde5402b8d3d3f96804fe7ce as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/26b38eb1cde5402b8d3d3f96804fe7ce 2024-11-15T22:40:19,903 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/26b38eb1cde5402b8d3d3f96804fe7ce, entries=11, sequenceid=248, filesize=16.4 K 2024-11-15T22:40:19,904 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=8.41 KB/8608 for af7145e43a48f7b93cf9bedd838f7d2c in 20ms, sequenceid=248, compaction requested=false 2024-11-15T22:40:19,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:20,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:20,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:20,296 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/8a0792b9aaec4d1eb857e514d4e47b5f as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/8a0792b9aaec4d1eb857e514d4e47b5f 2024-11-15T22:40:20,303 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in af7145e43a48f7b93cf9bedd838f7d2c/info of af7145e43a48f7b93cf9bedd838f7d2c into 8a0792b9aaec4d1eb857e514d4e47b5f(size=110.9 K), total size for store is 144.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:40:20,303 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:20,303 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c., storeName=af7145e43a48f7b93cf9bedd838f7d2c/info, priority=12, startTime=1731710419858; duration=0sec 2024-11-15T22:40:20,304 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:20,304 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af7145e43a48f7b93cf9bedd838f7d2c:info 2024-11-15T22:40:21,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:21,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:21,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:21,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-15T22:40:21,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/113108c6a7bb44be82c7040250a9e59f is 1080, key is row0185/info:/1731710419885/Put/seqid=0 2024-11-15T22:40:21,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741870_1046 (size=14675) 2024-11-15T22:40:21,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741870_1046 (size=14675) 2024-11-15T22:40:21,920 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/113108c6a7bb44be82c7040250a9e59f 2024-11-15T22:40:21,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/113108c6a7bb44be82c7040250a9e59f as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/113108c6a7bb44be82c7040250a9e59f 2024-11-15T22:40:21,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/113108c6a7bb44be82c7040250a9e59f, entries=9, sequenceid=261, filesize=14.3 K 2024-11-15T22:40:21,932 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=9.46 KB/9684 for af7145e43a48f7b93cf9bedd838f7d2c in 26ms, sequenceid=261, compaction requested=true 2024-11-15T22:40:21,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:21,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af7145e43a48f7b93cf9bedd838f7d2c:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T22:40:21,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:21,932 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-15T22:40:21,934 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 162952 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-15T22:40:21,934 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1541): af7145e43a48f7b93cf9bedd838f7d2c/info is initiating minor compaction (all files) 2024-11-15T22:40:21,934 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of af7145e43a48f7b93cf9bedd838f7d2c/info in TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:21,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:21,934 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/8a0792b9aaec4d1eb857e514d4e47b5f, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c43015f5c4ea421e9a714ccba7cff7b0, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/26b38eb1cde5402b8d3d3f96804fe7ce, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/113108c6a7bb44be82c7040250a9e59f] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp, totalSize=159.1 K 2024-11-15T22:40:21,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-15T22:40:21,935 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8a0792b9aaec4d1eb857e514d4e47b5f, keycount=100, bloomtype=ROW, size=110.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1731710401492 2024-11-15T22:40:21,935 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting c43015f5c4ea421e9a714ccba7cff7b0, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1731710419835 2024-11-15T22:40:21,935 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 26b38eb1cde5402b8d3d3f96804fe7ce, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1731710419861 2024-11-15T22:40:21,936 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 113108c6a7bb44be82c7040250a9e59f, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1731710419885 2024-11-15T22:40:21,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/03668ba5f0da4489bf026200130d3c4c is 1080, key is row0194/info:/1731710421910/Put/seqid=0 2024-11-15T22:40:21,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741871_1047 (size=16839) 2024-11-15T22:40:21,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741871_1047 (size=16839) 2024-11-15T22:40:21,944 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/03668ba5f0da4489bf026200130d3c4c 2024-11-15T22:40:21,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/03668ba5f0da4489bf026200130d3c4c as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/03668ba5f0da4489bf026200130d3c4c 2024-11-15T22:40:21,950 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af7145e43a48f7b93cf9bedd838f7d2c#info#compaction#87 average throughput is 45.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:40:21,951 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/04d44eb00083459fb27930bfa8a45709 is 1080, key is row0062/info:/1731710401492/Put/seqid=0 2024-11-15T22:40:21,956 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/03668ba5f0da4489bf026200130d3c4c, entries=11, sequenceid=275, filesize=16.4 K 2024-11-15T22:40:21,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for af7145e43a48f7b93cf9bedd838f7d2c in 22ms, sequenceid=275, compaction requested=false 2024-11-15T22:40:21,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:21,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741872_1048 (size=148383) 2024-11-15T22:40:21,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741872_1048 (size=148383) 2024-11-15T22:40:21,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:21,959 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-15T22:40:21,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/d6b55e1b84bc4f2f996283070fce5fee is 1080, key is row0205/info:/1731710421935/Put/seqid=0 2024-11-15T22:40:21,966 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/04d44eb00083459fb27930bfa8a45709 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/04d44eb00083459fb27930bfa8a45709 2024-11-15T22:40:21,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741873_1049 (size=16839) 2024-11-15T22:40:21,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741873_1049 (size=16839) 2024-11-15T22:40:21,968 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/d6b55e1b84bc4f2f996283070fce5fee 2024-11-15T22:40:21,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/d6b55e1b84bc4f2f996283070fce5fee as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/d6b55e1b84bc4f2f996283070fce5fee 2024-11-15T22:40:21,973 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in af7145e43a48f7b93cf9bedd838f7d2c/info of af7145e43a48f7b93cf9bedd838f7d2c into 04d44eb00083459fb27930bfa8a45709(size=144.9 K), total size for store is 161.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:40:21,973 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:21,973 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c., storeName=af7145e43a48f7b93cf9bedd838f7d2c/info, priority=12, startTime=1731710421932; duration=0sec 2024-11-15T22:40:21,973 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:21,973 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af7145e43a48f7b93cf9bedd838f7d2c:info 2024-11-15T22:40:21,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/d6b55e1b84bc4f2f996283070fce5fee, entries=11, sequenceid=289, filesize=16.4 K 2024-11-15T22:40:21,979 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for af7145e43a48f7b93cf9bedd838f7d2c in 20ms, sequenceid=289, compaction requested=true 2024-11-15T22:40:21,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:21,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af7145e43a48f7b93cf9bedd838f7d2c:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T22:40:21,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:21,979 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T22:40:21,980 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 182061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T22:40:21,980 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1541): af7145e43a48f7b93cf9bedd838f7d2c/info is initiating minor compaction (all files) 2024-11-15T22:40:21,980 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of af7145e43a48f7b93cf9bedd838f7d2c/info in TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:21,980 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/04d44eb00083459fb27930bfa8a45709, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/03668ba5f0da4489bf026200130d3c4c, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/d6b55e1b84bc4f2f996283070fce5fee] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp, totalSize=177.8 K 2024-11-15T22:40:21,980 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 04d44eb00083459fb27930bfa8a45709, keycount=132, bloomtype=ROW, size=144.9 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1731710401492 2024-11-15T22:40:21,981 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 03668ba5f0da4489bf026200130d3c4c, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1731710421910 2024-11-15T22:40:21,981 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting d6b55e1b84bc4f2f996283070fce5fee, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1731710421935 2024-11-15T22:40:21,991 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af7145e43a48f7b93cf9bedd838f7d2c#info#compaction#89 average throughput is 79.01 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:40:21,992 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/bf10513e7a0344f8a9f5e269ae2687b1 is 1080, key is row0062/info:/1731710401492/Put/seqid=0 2024-11-15T22:40:21,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741874_1050 (size=172231) 2024-11-15T22:40:21,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741874_1050 (size=172231) 2024-11-15T22:40:22,000 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/bf10513e7a0344f8a9f5e269ae2687b1 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/bf10513e7a0344f8a9f5e269ae2687b1 2024-11-15T22:40:22,007 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in af7145e43a48f7b93cf9bedd838f7d2c/info of af7145e43a48f7b93cf9bedd838f7d2c into bf10513e7a0344f8a9f5e269ae2687b1(size=168.2 K), total size for store is 168.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:40:22,007 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:22,007 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c., storeName=af7145e43a48f7b93cf9bedd838f7d2c/info, priority=13, startTime=1731710421979; duration=0sec 2024-11-15T22:40:22,007 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:22,007 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af7145e43a48f7b93cf9bedd838f7d2c:info 2024-11-15T22:40:22,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:22,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:23,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:23,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:23,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:23,983 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-15T22:40:23,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/20a2e53411ac4b17b862f5aa192abc95 is 1080, key is row0216/info:/1731710421960/Put/seqid=0 2024-11-15T22:40:23,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741875_1051 (size=15760) 2024-11-15T22:40:23,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741875_1051 (size=15760) 2024-11-15T22:40:23,997 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/20a2e53411ac4b17b862f5aa192abc95 2024-11-15T22:40:24,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/20a2e53411ac4b17b862f5aa192abc95 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/20a2e53411ac4b17b862f5aa192abc95 2024-11-15T22:40:24,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/20a2e53411ac4b17b862f5aa192abc95, entries=10, sequenceid=304, filesize=15.4 K 2024-11-15T22:40:24,009 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=11.56 KB/11836 for af7145e43a48f7b93cf9bedd838f7d2c in 25ms, sequenceid=304, compaction requested=false 2024-11-15T22:40:24,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:24,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:24,009 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-15T22:40:24,014 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/5d0a1976108d4586946d4812973f8f1f is 1080, key is row0226/info:/1731710423985/Put/seqid=0 2024-11-15T22:40:24,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741876_1052 (size=17918) 2024-11-15T22:40:24,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741876_1052 (size=17918) 2024-11-15T22:40:24,020 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/5d0a1976108d4586946d4812973f8f1f 2024-11-15T22:40:24,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/5d0a1976108d4586946d4812973f8f1f as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/5d0a1976108d4586946d4812973f8f1f 2024-11-15T22:40:24,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/5d0a1976108d4586946d4812973f8f1f, entries=12, sequenceid=319, filesize=17.5 K 2024-11-15T22:40:24,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for af7145e43a48f7b93cf9bedd838f7d2c in 22ms, sequenceid=319, compaction requested=true 2024-11-15T22:40:24,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:24,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af7145e43a48f7b93cf9bedd838f7d2c:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T22:40:24,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:24,031 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T22:40:24,032 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 205909 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T22:40:24,032 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1541): af7145e43a48f7b93cf9bedd838f7d2c/info is initiating minor compaction (all files) 2024-11-15T22:40:24,032 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of af7145e43a48f7b93cf9bedd838f7d2c/info in TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:24,032 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/bf10513e7a0344f8a9f5e269ae2687b1, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/20a2e53411ac4b17b862f5aa192abc95, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/5d0a1976108d4586946d4812973f8f1f] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp, totalSize=201.1 K 2024-11-15T22:40:24,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42145 {}] regionserver.HRegion(8855): Flush requested on af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:24,033 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-15T22:40:24,033 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting bf10513e7a0344f8a9f5e269ae2687b1, keycount=154, bloomtype=ROW, size=168.2 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1731710401492 2024-11-15T22:40:24,033 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 20a2e53411ac4b17b862f5aa192abc95, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1731710421960 2024-11-15T22:40:24,033 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5d0a1976108d4586946d4812973f8f1f, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1731710423985 2024-11-15T22:40:24,036 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/1df213e5025d4943a49779aae9452372 is 1080, key is row0238/info:/1731710424010/Put/seqid=0 2024-11-15T22:40:24,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741877_1053 (size=17918) 2024-11-15T22:40:24,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741877_1053 (size=17918) 2024-11-15T22:40:24,042 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/1df213e5025d4943a49779aae9452372 2024-11-15T22:40:24,046 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af7145e43a48f7b93cf9bedd838f7d2c#info#compaction#93 average throughput is 60.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T22:40:24,046 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/192e20003f584bb68808c70b0182f1b9 is 1080, key is row0062/info:/1731710401492/Put/seqid=0 2024-11-15T22:40:24,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741878_1054 (size=196075) 2024-11-15T22:40:24,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741878_1054 (size=196075) 2024-11-15T22:40:24,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/1df213e5025d4943a49779aae9452372 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/1df213e5025d4943a49779aae9452372 2024-11-15T22:40:24,056 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/1df213e5025d4943a49779aae9452372, entries=12, sequenceid=334, filesize=17.5 K 2024-11-15T22:40:24,056 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/192e20003f584bb68808c70b0182f1b9 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/192e20003f584bb68808c70b0182f1b9 2024-11-15T22:40:24,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for af7145e43a48f7b93cf9bedd838f7d2c in 24ms, sequenceid=334, compaction requested=false 2024-11-15T22:40:24,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:24,061 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in af7145e43a48f7b93cf9bedd838f7d2c/info of af7145e43a48f7b93cf9bedd838f7d2c into 192e20003f584bb68808c70b0182f1b9(size=191.5 K), total size for store is 209.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T22:40:24,061 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:24,061 INFO [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c., storeName=af7145e43a48f7b93cf9bedd838f7d2c/info, priority=13, startTime=1731710424031; duration=0sec 2024-11-15T22:40:24,061 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T22:40:24,061 DEBUG [RS:0;e611192d6313:42145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af7145e43a48f7b93cf9bedd838f7d2c:info 2024-11-15T22:40:24,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:24,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:25,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:25,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:26,048 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-15T22:40:26,048 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C42145%2C1731710388091.1731710426048 2024-11-15T22:40:26,058 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,058 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,058 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,058 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,058 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,059 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/WALs/e611192d6313,42145,1731710388091/e611192d6313%2C42145%2C1731710388091.1731710388713 with entries=319, filesize=310.64 KB; new WAL /user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/WALs/e611192d6313,42145,1731710388091/e611192d6313%2C42145%2C1731710388091.1731710426048 2024-11-15T22:40:26,060 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37289:37289),(127.0.0.1/127.0.0.1:37107:37107)] 2024-11-15T22:40:26,060 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/WALs/e611192d6313,42145,1731710388091/e611192d6313%2C42145%2C1731710388091.1731710388713 is not closed yet, will try archiving it next time 2024-11-15T22:40:26,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741833_1009 (size=318107) 2024-11-15T22:40:26,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741833_1009 (size=318107) 2024-11-15T22:40:26,090 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing af7145e43a48f7b93cf9bedd838f7d2c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T22:40:26,094 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/22df18f9dfa54e668cbfdf53f696dd0e is 1080, key is row0250/info:/1731710424034/Put/seqid=0 2024-11-15T22:40:26,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741880_1056 (size=12523) 2024-11-15T22:40:26,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741880_1056 (size=12523) 2024-11-15T22:40:26,101 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/22df18f9dfa54e668cbfdf53f696dd0e 2024-11-15T22:40:26,109 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/.tmp/info/22df18f9dfa54e668cbfdf53f696dd0e as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/22df18f9dfa54e668cbfdf53f696dd0e 2024-11-15T22:40:26,114 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/22df18f9dfa54e668cbfdf53f696dd0e, entries=7, sequenceid=345, filesize=12.2 K 2024-11-15T22:40:26,115 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for af7145e43a48f7b93cf9bedd838f7d2c in 26ms, sequenceid=345, compaction requested=true 2024-11-15T22:40:26,115 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for af7145e43a48f7b93cf9bedd838f7d2c: 2024-11-15T22:40:26,115 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-15T22:40:26,120 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/.tmp/info/1ec2e387bd334f9b8a04ed2f322f6fb1 is 186, key is TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912./info:regioninfo/1731710404309/Put/seqid=0 2024-11-15T22:40:26,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741881_1057 (size=6153) 2024-11-15T22:40:26,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741881_1057 (size=6153) 2024-11-15T22:40:26,126 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/.tmp/info/1ec2e387bd334f9b8a04ed2f322f6fb1 2024-11-15T22:40:26,131 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/.tmp/info/1ec2e387bd334f9b8a04ed2f322f6fb1 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/info/1ec2e387bd334f9b8a04ed2f322f6fb1 2024-11-15T22:40:26,135 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/info/1ec2e387bd334f9b8a04ed2f322f6fb1, entries=5, sequenceid=21, filesize=6.0 K 2024-11-15T22:40:26,136 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=21, compaction requested=false 2024-11-15T22:40:26,136 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-15T22:40:26,136 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 6aa73352ad577c320cd9915f5ef98912: 2024-11-15T22:40:26,136 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C42145%2C1731710388091.1731710426136 2024-11-15T22:40:26,141 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,141 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,141 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,141 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,141 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,141 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/WALs/e611192d6313,42145,1731710388091/e611192d6313%2C42145%2C1731710388091.1731710426048 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/WALs/e611192d6313,42145,1731710388091/e611192d6313%2C42145%2C1731710388091.1731710426136 2024-11-15T22:40:26,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741879_1055 (size=731) 2024-11-15T22:40:26,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741879_1055 (size=731) 2024-11-15T22:40:26,149 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37107:37107),(127.0.0.1/127.0.0.1:37289:37289)] 2024-11-15T22:40:26,149 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/WALs/e611192d6313,42145,1731710388091/e611192d6313%2C42145%2C1731710388091.1731710388713 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/oldWALs/e611192d6313%2C42145%2C1731710388091.1731710388713 2024-11-15T22:40:26,149 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T22:40:26,150 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T22:40:26,150 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T22:40:26,150 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:40:26,150 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:40:26,150 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:40:26,150 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T22:40:26,150 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T22:40:26,150 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1409958525, stopped=false 2024-11-15T22:40:26,150 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e611192d6313,45633,1731710387911 2024-11-15T22:40:26,150 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/WALs/e611192d6313,42145,1731710388091/e611192d6313%2C42145%2C1731710388091.1731710426048 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/oldWALs/e611192d6313%2C42145%2C1731710388091.1731710426048 2024-11-15T22:40:26,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T22:40:26,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T22:40:26,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:26,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:26,208 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T22:40:26,208 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T22:40:26,209 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:40:26,209 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:40:26,209 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:40:26,209 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e611192d6313,42145,1731710388091' ***** 2024-11-15T22:40:26,209 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T22:40:26,209 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:40:26,210 INFO [RS:0;e611192d6313:42145 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T22:40:26,210 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T22:40:26,211 INFO [RS:0;e611192d6313:42145 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T22:40:26,211 INFO [RS:0;e611192d6313:42145 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T22:40:26,211 INFO [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(3091): Received CLOSE for af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:26,211 INFO [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(3091): Received CLOSE for 6aa73352ad577c320cd9915f5ef98912 2024-11-15T22:40:26,211 INFO [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(959): stopping server e611192d6313,42145,1731710388091 2024-11-15T22:40:26,211 INFO [RS:0;e611192d6313:42145 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T22:40:26,211 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing af7145e43a48f7b93cf9bedd838f7d2c, disabling compactions & flushes 2024-11-15T22:40:26,211 INFO [RS:0;e611192d6313:42145 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e611192d6313:42145. 2024-11-15T22:40:26,211 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:26,211 DEBUG [RS:0;e611192d6313:42145 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:40:26,211 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:26,212 DEBUG [RS:0;e611192d6313:42145 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:40:26,212 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. after waiting 0 ms 2024-11-15T22:40:26,212 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:26,212 INFO [RS:0;e611192d6313:42145 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T22:40:26,212 INFO [RS:0;e611192d6313:42145 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T22:40:26,212 INFO [RS:0;e611192d6313:42145 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T22:40:26,212 INFO [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T22:40:26,212 INFO [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-15T22:40:26,212 DEBUG [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(1325): Online Regions={af7145e43a48f7b93cf9bedd838f7d2c=TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c., 1588230740=hbase:meta,,1.1588230740, 6aa73352ad577c320cd9915f5ef98912=TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912.} 2024-11-15T22:40:26,212 DEBUG [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6aa73352ad577c320cd9915f5ef98912, af7145e43a48f7b93cf9bedd838f7d2c 2024-11-15T22:40:26,213 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T22:40:26,213 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T22:40:26,213 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T22:40:26,213 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T22:40:26,213 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T22:40:26,213 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa.0823cc1fab0ce53b4677252ec2fad0f8->hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa-top, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-3ac28241d6964bac9680183ab346c57e, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/3f12f94d498e4cf99ad7c3d8e373bce4, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-bb4306ce97c54449b05507362ffabd06, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/0b15562fd44c4c36aa912c537f01a009, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c71bc018e2884529aa47faa5f43f8bb3, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/229e04f781534606a394b6ccb708632f, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/dad9c980c24646b28dd0628481c18fc0, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/00e6989765684c5fb4d9e8679b6b4db7, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/5862f3cd277a47428a64e6796768ca27, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/f8ed5427366d450c9875d44f5c6f2ad5, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/0f4ec63538774742af777c5354aa8515, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/8a0792b9aaec4d1eb857e514d4e47b5f, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c75beb58ccb049728b9710e499e75a0e, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c43015f5c4ea421e9a714ccba7cff7b0, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/26b38eb1cde5402b8d3d3f96804fe7ce, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/04d44eb00083459fb27930bfa8a45709, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/113108c6a7bb44be82c7040250a9e59f, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/03668ba5f0da4489bf026200130d3c4c, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/bf10513e7a0344f8a9f5e269ae2687b1, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/d6b55e1b84bc4f2f996283070fce5fee, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/20a2e53411ac4b17b862f5aa192abc95, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/5d0a1976108d4586946d4812973f8f1f] to archive 2024-11-15T22:40:26,215 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T22:40:26,218 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa.0823cc1fab0ce53b4677252ec2fad0f8 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa.0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:26,219 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-15T22:40:26,220 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-3ac28241d6964bac9680183ab346c57e to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-3ac28241d6964bac9680183ab346c57e 2024-11-15T22:40:26,220 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T22:40:26,220 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T22:40:26,220 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731710426212Running coprocessor pre-close hooks at 1731710426212Disabling compacts and flushes for region at 1731710426212Disabling writes for close at 1731710426213 (+1 ms)Writing region close event to WAL at 1731710426215 (+2 ms)Running coprocessor post-close hooks at 1731710426220 (+5 ms)Closed at 1731710426220 2024-11-15T22:40:26,220 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T22:40:26,221 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/3f12f94d498e4cf99ad7c3d8e373bce4 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/3f12f94d498e4cf99ad7c3d8e373bce4 2024-11-15T22:40:26,222 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-bb4306ce97c54449b05507362ffabd06 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/TestLogRolling-testLogRolling=0823cc1fab0ce53b4677252ec2fad0f8-bb4306ce97c54449b05507362ffabd06 2024-11-15T22:40:26,223 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/0b15562fd44c4c36aa912c537f01a009 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/0b15562fd44c4c36aa912c537f01a009 2024-11-15T22:40:26,225 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c71bc018e2884529aa47faa5f43f8bb3 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c71bc018e2884529aa47faa5f43f8bb3 2024-11-15T22:40:26,226 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/229e04f781534606a394b6ccb708632f to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/229e04f781534606a394b6ccb708632f 2024-11-15T22:40:26,227 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/dad9c980c24646b28dd0628481c18fc0 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/dad9c980c24646b28dd0628481c18fc0 2024-11-15T22:40:26,228 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/00e6989765684c5fb4d9e8679b6b4db7 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/00e6989765684c5fb4d9e8679b6b4db7 2024-11-15T22:40:26,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:26,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:26,229 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/5862f3cd277a47428a64e6796768ca27 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/5862f3cd277a47428a64e6796768ca27 2024-11-15T22:40:26,231 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/f8ed5427366d450c9875d44f5c6f2ad5 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/f8ed5427366d450c9875d44f5c6f2ad5 2024-11-15T22:40:26,232 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/0f4ec63538774742af777c5354aa8515 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/0f4ec63538774742af777c5354aa8515 2024-11-15T22:40:26,233 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/8a0792b9aaec4d1eb857e514d4e47b5f to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/8a0792b9aaec4d1eb857e514d4e47b5f 2024-11-15T22:40:26,234 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c75beb58ccb049728b9710e499e75a0e to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c75beb58ccb049728b9710e499e75a0e 2024-11-15T22:40:26,235 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c43015f5c4ea421e9a714ccba7cff7b0 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/c43015f5c4ea421e9a714ccba7cff7b0 2024-11-15T22:40:26,237 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/26b38eb1cde5402b8d3d3f96804fe7ce to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/26b38eb1cde5402b8d3d3f96804fe7ce 2024-11-15T22:40:26,238 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/04d44eb00083459fb27930bfa8a45709 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/04d44eb00083459fb27930bfa8a45709 2024-11-15T22:40:26,239 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/113108c6a7bb44be82c7040250a9e59f to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/113108c6a7bb44be82c7040250a9e59f 2024-11-15T22:40:26,240 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/03668ba5f0da4489bf026200130d3c4c to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/03668ba5f0da4489bf026200130d3c4c 2024-11-15T22:40:26,241 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/bf10513e7a0344f8a9f5e269ae2687b1 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/bf10513e7a0344f8a9f5e269ae2687b1 2024-11-15T22:40:26,242 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/d6b55e1b84bc4f2f996283070fce5fee to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/d6b55e1b84bc4f2f996283070fce5fee 2024-11-15T22:40:26,244 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/20a2e53411ac4b17b862f5aa192abc95 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/20a2e53411ac4b17b862f5aa192abc95 2024-11-15T22:40:26,245 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/5d0a1976108d4586946d4812973f8f1f to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/info/5d0a1976108d4586946d4812973f8f1f 2024-11-15T22:40:26,246 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e611192d6313:45633 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-15T22:40:26,246 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [3f12f94d498e4cf99ad7c3d8e373bce4=42984, 0b15562fd44c4c36aa912c537f01a009=12516, c71bc018e2884529aa47faa5f43f8bb3=63636, 229e04f781534606a394b6ccb708632f=17906, dad9c980c24646b28dd0628481c18fc0=17906, 00e6989765684c5fb4d9e8679b6b4db7=84293, 5862f3cd277a47428a64e6796768ca27=12516, f8ed5427366d450c9875d44f5c6f2ad5=15750, 0f4ec63538774742af777c5354aa8515=15750, 8a0792b9aaec4d1eb857e514d4e47b5f=113543, c75beb58ccb049728b9710e499e75a0e=12516, c43015f5c4ea421e9a714ccba7cff7b0=17906, 26b38eb1cde5402b8d3d3f96804fe7ce=16828, 04d44eb00083459fb27930bfa8a45709=148383, 113108c6a7bb44be82c7040250a9e59f=14675, 03668ba5f0da4489bf026200130d3c4c=16839, bf10513e7a0344f8a9f5e269ae2687b1=172231, d6b55e1b84bc4f2f996283070fce5fee=16839, 20a2e53411ac4b17b862f5aa192abc95=15760, 5d0a1976108d4586946d4812973f8f1f=17918] 2024-11-15T22:40:26,250 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/af7145e43a48f7b93cf9bedd838f7d2c/recovered.edits/348.seqid, newMaxSeqId=348, maxSeqId=130 2024-11-15T22:40:26,251 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:26,251 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for af7145e43a48f7b93cf9bedd838f7d2c: Waiting for close lock at 1731710426211Running coprocessor pre-close hooks at 1731710426211Disabling compacts and flushes for region at 1731710426211Disabling writes for close at 1731710426212 (+1 ms)Writing region close event to WAL at 1731710426246 (+34 ms)Running coprocessor post-close hooks at 1731710426250 (+4 ms)Closed at 1731710426251 (+1 ms) 2024-11-15T22:40:26,251 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731710403600.af7145e43a48f7b93cf9bedd838f7d2c. 2024-11-15T22:40:26,251 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6aa73352ad577c320cd9915f5ef98912, disabling compactions & flushes 2024-11-15T22:40:26,251 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912. 2024-11-15T22:40:26,251 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912. 2024-11-15T22:40:26,251 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912. after waiting 0 ms 2024-11-15T22:40:26,251 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912. 2024-11-15T22:40:26,251 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/6aa73352ad577c320cd9915f5ef98912/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa.0823cc1fab0ce53b4677252ec2fad0f8->hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/0823cc1fab0ce53b4677252ec2fad0f8/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa-bottom] to archive 2024-11-15T22:40:26,252 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T22:40:26,254 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/6aa73352ad577c320cd9915f5ef98912/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa.0823cc1fab0ce53b4677252ec2fad0f8 to hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/archive/data/default/TestLogRolling-testLogRolling/6aa73352ad577c320cd9915f5ef98912/info/c5fbfa3bd0a9472cac09bbc8fe4d4daa.0823cc1fab0ce53b4677252ec2fad0f8 2024-11-15T22:40:26,254 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-15T22:40:26,258 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/data/default/TestLogRolling-testLogRolling/6aa73352ad577c320cd9915f5ef98912/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-11-15T22:40:26,258 INFO [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912. 2024-11-15T22:40:26,258 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6aa73352ad577c320cd9915f5ef98912: Waiting for close lock at 1731710426251Running coprocessor pre-close hooks at 1731710426251Disabling compacts and flushes for region at 1731710426251Disabling writes for close at 1731710426251Writing region close event to WAL at 1731710426254 (+3 ms)Running coprocessor post-close hooks at 1731710426258 (+4 ms)Closed at 1731710426258 2024-11-15T22:40:26,258 DEBUG [RS_CLOSE_REGION-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731710403600.6aa73352ad577c320cd9915f5ef98912. 2024-11-15T22:40:26,413 INFO [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(976): stopping server e611192d6313,42145,1731710388091; all regions closed. 2024-11-15T22:40:26,413 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,413 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,413 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,414 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,414 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741834_1010 (size=8107) 2024-11-15T22:40:26,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741834_1010 (size=8107) 2024-11-15T22:40:26,420 DEBUG [RS:0;e611192d6313:42145 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/oldWALs 2024-11-15T22:40:26,420 INFO [RS:0;e611192d6313:42145 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e611192d6313%2C42145%2C1731710388091.meta:.meta(num 1731710389054) 2024-11-15T22:40:26,421 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,421 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,421 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,421 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,421 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741882_1058 (size=780) 2024-11-15T22:40:26,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741882_1058 (size=780) 2024-11-15T22:40:26,426 DEBUG [RS:0;e611192d6313:42145 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/oldWALs 2024-11-15T22:40:26,426 INFO [RS:0;e611192d6313:42145 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e611192d6313%2C42145%2C1731710388091:(num 1731710426136) 2024-11-15T22:40:26,426 DEBUG [RS:0;e611192d6313:42145 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:40:26,426 INFO [RS:0;e611192d6313:42145 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T22:40:26,426 INFO [RS:0;e611192d6313:42145 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T22:40:26,426 INFO [RS:0;e611192d6313:42145 {}] hbase.ChoreService(370): Chore service for: regionserver/e611192d6313:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T22:40:26,426 INFO [RS:0;e611192d6313:42145 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T22:40:26,426 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T22:40:26,426 INFO [RS:0;e611192d6313:42145 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42145 2024-11-15T22:40:26,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:40:26,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e611192d6313,42145,1731710388091 2024-11-15T22:40:26,439 INFO [RS:0;e611192d6313:42145 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T22:40:26,450 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e611192d6313,42145,1731710388091] 2024-11-15T22:40:26,460 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e611192d6313,42145,1731710388091 already deleted, retry=false 2024-11-15T22:40:26,460 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e611192d6313,42145,1731710388091 expired; onlineServers=0 2024-11-15T22:40:26,460 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e611192d6313,45633,1731710387911' ***** 2024-11-15T22:40:26,461 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T22:40:26,461 INFO [M:0;e611192d6313:45633 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T22:40:26,461 INFO [M:0;e611192d6313:45633 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T22:40:26,461 DEBUG [M:0;e611192d6313:45633 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T22:40:26,461 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T22:40:26,461 DEBUG [M:0;e611192d6313:45633 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T22:40:26,461 DEBUG [master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710388427 {}] cleaner.HFileCleaner(306): Exit Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710388427,5,FailOnTimeoutGroup] 2024-11-15T22:40:26,461 DEBUG [master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710388426 {}] cleaner.HFileCleaner(306): Exit Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710388426,5,FailOnTimeoutGroup] 2024-11-15T22:40:26,462 INFO [M:0;e611192d6313:45633 {}] hbase.ChoreService(370): Chore service for: master/e611192d6313:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T22:40:26,462 INFO [M:0;e611192d6313:45633 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T22:40:26,462 DEBUG [M:0;e611192d6313:45633 {}] master.HMaster(1795): Stopping service threads 2024-11-15T22:40:26,462 INFO [M:0;e611192d6313:45633 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T22:40:26,462 INFO [M:0;e611192d6313:45633 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T22:40:26,462 INFO [M:0;e611192d6313:45633 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T22:40:26,463 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T22:40:26,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T22:40:26,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:26,471 DEBUG [M:0;e611192d6313:45633 {}] zookeeper.ZKUtil(347): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T22:40:26,471 WARN [M:0;e611192d6313:45633 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T22:40:26,472 INFO [M:0;e611192d6313:45633 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/.lastflushedseqids 2024-11-15T22:40:26,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741883_1059 (size=228) 2024-11-15T22:40:26,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741883_1059 (size=228) 2024-11-15T22:40:26,481 INFO [M:0;e611192d6313:45633 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T22:40:26,481 INFO [M:0;e611192d6313:45633 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T22:40:26,481 DEBUG [M:0;e611192d6313:45633 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T22:40:26,481 INFO [M:0;e611192d6313:45633 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:40:26,481 DEBUG [M:0;e611192d6313:45633 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:40:26,481 DEBUG [M:0;e611192d6313:45633 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T22:40:26,481 DEBUG [M:0;e611192d6313:45633 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:40:26,482 INFO [M:0;e611192d6313:45633 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.36 KB 2024-11-15T22:40:26,496 DEBUG [M:0;e611192d6313:45633 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/16fc59d2f3fd4790b758db51a5bb39c6 is 82, key is hbase:meta,,1/info:regioninfo/1731710389078/Put/seqid=0 2024-11-15T22:40:26,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741884_1060 (size=5672) 2024-11-15T22:40:26,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741884_1060 (size=5672) 2024-11-15T22:40:26,500 INFO [M:0;e611192d6313:45633 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/16fc59d2f3fd4790b758db51a5bb39c6 2024-11-15T22:40:26,518 DEBUG [M:0;e611192d6313:45633 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fb93397078624d8a92311fa05f03501d is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731710389606/Put/seqid=0 2024-11-15T22:40:26,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741885_1061 (size=7089) 2024-11-15T22:40:26,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741885_1061 (size=7089) 2024-11-15T22:40:26,522 INFO [M:0;e611192d6313:45633 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fb93397078624d8a92311fa05f03501d 2024-11-15T22:40:26,526 INFO [M:0;e611192d6313:45633 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fb93397078624d8a92311fa05f03501d 2024-11-15T22:40:26,538 DEBUG [M:0;e611192d6313:45633 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/42788e7f8d2c47e5bccb1f4bdab01b28 is 69, key is e611192d6313,42145,1731710388091/rs:state/1731710388562/Put/seqid=0 2024-11-15T22:40:26,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741886_1062 (size=5156) 2024-11-15T22:40:26,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741886_1062 (size=5156) 2024-11-15T22:40:26,543 INFO [M:0;e611192d6313:45633 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/42788e7f8d2c47e5bccb1f4bdab01b28 2024-11-15T22:40:26,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:40:26,550 INFO [RS:0;e611192d6313:42145 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T22:40:26,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42145-0x10140a782610001, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:40:26,550 INFO [RS:0;e611192d6313:42145 {}] regionserver.HRegionServer(1031): Exiting; stopping=e611192d6313,42145,1731710388091; zookeeper connection closed. 2024-11-15T22:40:26,550 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7fbbe09a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7fbbe09a 2024-11-15T22:40:26,550 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T22:40:26,561 DEBUG [M:0;e611192d6313:45633 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/48ada96140834d518cdb51ad259b91dd is 52, key is load_balancer_on/state:d/1731710389236/Put/seqid=0 2024-11-15T22:40:26,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741887_1063 (size=5056) 2024-11-15T22:40:26,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741887_1063 (size=5056) 2024-11-15T22:40:26,565 INFO [M:0;e611192d6313:45633 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/48ada96140834d518cdb51ad259b91dd 2024-11-15T22:40:26,569 DEBUG [M:0;e611192d6313:45633 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/16fc59d2f3fd4790b758db51a5bb39c6 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/16fc59d2f3fd4790b758db51a5bb39c6 2024-11-15T22:40:26,574 INFO [M:0;e611192d6313:45633 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/16fc59d2f3fd4790b758db51a5bb39c6, entries=8, sequenceid=125, filesize=5.5 K 2024-11-15T22:40:26,575 DEBUG [M:0;e611192d6313:45633 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fb93397078624d8a92311fa05f03501d as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fb93397078624d8a92311fa05f03501d 2024-11-15T22:40:26,580 INFO [M:0;e611192d6313:45633 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fb93397078624d8a92311fa05f03501d 2024-11-15T22:40:26,580 INFO [M:0;e611192d6313:45633 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fb93397078624d8a92311fa05f03501d, entries=13, sequenceid=125, filesize=6.9 K 2024-11-15T22:40:26,581 DEBUG [M:0;e611192d6313:45633 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/42788e7f8d2c47e5bccb1f4bdab01b28 as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/42788e7f8d2c47e5bccb1f4bdab01b28 2024-11-15T22:40:26,583 INFO [regionserver/e611192d6313:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T22:40:26,587 INFO [M:0;e611192d6313:45633 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/42788e7f8d2c47e5bccb1f4bdab01b28, entries=1, sequenceid=125, filesize=5.0 K 2024-11-15T22:40:26,589 DEBUG [M:0;e611192d6313:45633 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/48ada96140834d518cdb51ad259b91dd as hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/48ada96140834d518cdb51ad259b91dd 2024-11-15T22:40:26,594 INFO [M:0;e611192d6313:45633 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/7838025a-9897-8d98-1cea-9c03e55ca789/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/48ada96140834d518cdb51ad259b91dd, entries=1, sequenceid=125, filesize=4.9 K 2024-11-15T22:40:26,595 INFO [M:0;e611192d6313:45633 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=125, compaction requested=false 2024-11-15T22:40:26,596 INFO [M:0;e611192d6313:45633 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:40:26,596 DEBUG [M:0;e611192d6313:45633 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731710426481Disabling compacts and flushes for region at 1731710426481Disabling writes for close at 1731710426481Obtaining lock to block concurrent updates at 1731710426482 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731710426482Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1731710426482Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731710426482Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731710426483 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731710426496 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731710426496Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731710426504 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731710426517 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731710426517Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731710426526 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731710426538 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731710426538Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731710426546 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731710426560 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731710426560Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@731f8d7f: reopening flushed file at 1731710426569 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45b72e57: reopening flushed file at 1731710426574 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@696ec78f: reopening flushed file at 1731710426580 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3275d984: reopening flushed file at 1731710426588 (+8 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=125, compaction requested=false at 1731710426595 (+7 ms)Writing region close event to WAL at 1731710426596 (+1 ms)Closed at 1731710426596 2024-11-15T22:40:26,597 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,597 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,597 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,597 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,597 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:26,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45387 is added to blk_1073741830_1006 (size=61320) 2024-11-15T22:40:26,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33291 is added to blk_1073741830_1006 (size=61320) 2024-11-15T22:40:26,599 INFO [M:0;e611192d6313:45633 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T22:40:26,599 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T22:40:26,599 INFO [M:0;e611192d6313:45633 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45633 2024-11-15T22:40:26,599 INFO [M:0;e611192d6313:45633 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T22:40:26,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:40:26,712 INFO [M:0;e611192d6313:45633 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T22:40:26,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45633-0x10140a782610000, quorum=127.0.0.1:51416, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:40:26,717 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c49e976{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:40:26,718 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@48b1d1cb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:40:26,718 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:40:26,718 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41b3b520{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:40:26,718 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@665a776{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/hadoop.log.dir/,STOPPED} 2024-11-15T22:40:26,721 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:40:26,721 WARN [BP-87975701-172.17.0.3-1731710385574 heartbeating to localhost/127.0.0.1:40299 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:40:26,721 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:40:26,721 WARN [BP-87975701-172.17.0.3-1731710385574 heartbeating to localhost/127.0.0.1:40299 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-87975701-172.17.0.3-1731710385574 (Datanode Uuid af750f05-61e3-4426-8a6e-d9e39f6a1730) service to localhost/127.0.0.1:40299 2024-11-15T22:40:26,722 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/cluster_cab337df-ff59-49d3-a639-80c31a24b544/data/data3/current/BP-87975701-172.17.0.3-1731710385574 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:40:26,722 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/cluster_cab337df-ff59-49d3-a639-80c31a24b544/data/data4/current/BP-87975701-172.17.0.3-1731710385574 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:40:26,722 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:40:26,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f61588{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:40:26,725 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a5db76d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:40:26,725 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:40:26,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5773e0ea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:40:26,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49e87d3f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/hadoop.log.dir/,STOPPED} 2024-11-15T22:40:26,727 WARN [BP-87975701-172.17.0.3-1731710385574 heartbeating to localhost/127.0.0.1:40299 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:40:26,727 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:40:26,727 WARN [BP-87975701-172.17.0.3-1731710385574 heartbeating to localhost/127.0.0.1:40299 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-87975701-172.17.0.3-1731710385574 (Datanode Uuid 267c508b-10f6-4bef-85dc-84943fe942bf) service to localhost/127.0.0.1:40299 2024-11-15T22:40:26,727 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:40:26,728 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/cluster_cab337df-ff59-49d3-a639-80c31a24b544/data/data1/current/BP-87975701-172.17.0.3-1731710385574 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:40:26,728 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/cluster_cab337df-ff59-49d3-a639-80c31a24b544/data/data2/current/BP-87975701-172.17.0.3-1731710385574 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:40:26,728 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:40:26,734 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7884e2a5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T22:40:26,735 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f87fe6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:40:26,735 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:40:26,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@191b8d86{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:40:26,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79ecb530{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/hadoop.log.dir/,STOPPED} 2024-11-15T22:40:26,741 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T22:40:26,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T22:40:26,789 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=232 (was 208) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40299 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40299 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:40299 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40299 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40299 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40299 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40299 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40299 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=518 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=68 (was 86), ProcessCount=11 (was 11), AvailableMemoryMB=3873 (was 3925) 2024-11-15T22:40:26,797 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=232, OpenFileDescriptor=518, MaxFileDescriptor=1048576, SystemLoadAverage=68, ProcessCount=11, AvailableMemoryMB=3873 2024-11-15T22:40:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T22:40:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/hadoop.log.dir so I do NOT create it in target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c 2024-11-15T22:40:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c5623668-9a91-6762-1fca-76fde62cf043/hadoop.tmp.dir so I do NOT create it in target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c 2024-11-15T22:40:26,797 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/cluster_2e2cd3ae-1b73-fbc0-9910-a5385aa1f816, deleteOnExit=true 2024-11-15T22:40:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T22:40:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/test.cache.data in system properties and HBase conf 2024-11-15T22:40:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T22:40:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/hadoop.log.dir in system properties and HBase conf 2024-11-15T22:40:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T22:40:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T22:40:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T22:40:26,797 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T22:40:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T22:40:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T22:40:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T22:40:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T22:40:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T22:40:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T22:40:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T22:40:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T22:40:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T22:40:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/nfs.dump.dir in system properties and HBase conf 2024-11-15T22:40:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/java.io.tmpdir in system properties and HBase conf 2024-11-15T22:40:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T22:40:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T22:40:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T22:40:26,809 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T22:40:27,148 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:40:27,151 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:40:27,152 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:40:27,152 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:40:27,152 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T22:40:27,153 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:40:27,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e2fef96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:40:27,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23266789{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:40:27,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:27,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:27,243 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7216654a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/java.io.tmpdir/jetty-localhost-33167-hadoop-hdfs-3_4_1-tests_jar-_-any-15433292836757637746/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T22:40:27,244 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ec9da7e{HTTP/1.1, (http/1.1)}{localhost:33167} 2024-11-15T22:40:27,244 INFO [Time-limited test {}] server.Server(415): Started @285402ms 2024-11-15T22:40:27,254 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T22:40:27,489 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:40:27,492 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:40:27,493 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:40:27,493 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:40:27,493 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T22:40:27,494 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49e6dd92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:40:27,494 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6bfe0bbd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:40:27,591 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@627a202d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/java.io.tmpdir/jetty-localhost-46221-hadoop-hdfs-3_4_1-tests_jar-_-any-8549509683782527596/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:40:27,591 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22ed154c{HTTP/1.1, (http/1.1)}{localhost:46221} 2024-11-15T22:40:27,591 INFO [Time-limited test {}] server.Server(415): Started @285749ms 2024-11-15T22:40:27,592 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:40:27,615 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T22:40:27,618 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T22:40:27,619 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T22:40:27,619 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T22:40:27,619 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T22:40:27,619 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9279e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/hadoop.log.dir/,AVAILABLE} 2024-11-15T22:40:27,619 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34604a81{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T22:40:27,712 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7966f06a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/java.io.tmpdir/jetty-localhost-45239-hadoop-hdfs-3_4_1-tests_jar-_-any-5161576210927318466/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:40:27,712 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c6abd2c{HTTP/1.1, (http/1.1)}{localhost:45239} 2024-11-15T22:40:27,712 INFO [Time-limited test {}] server.Server(415): Started @285870ms 2024-11-15T22:40:27,713 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T22:40:28,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:28,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:28,402 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T22:40:28,402 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T22:40:28,403 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T22:40:28,404 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-15T22:40:28,725 WARN [Thread-2504 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/cluster_2e2cd3ae-1b73-fbc0-9910-a5385aa1f816/data/data1/current/BP-197085225-172.17.0.3-1731710426813/current, will proceed with Du for space computation calculation, 2024-11-15T22:40:28,725 WARN [Thread-2505 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/cluster_2e2cd3ae-1b73-fbc0-9910-a5385aa1f816/data/data2/current/BP-197085225-172.17.0.3-1731710426813/current, will proceed with Du for space computation calculation, 2024-11-15T22:40:28,741 WARN [Thread-2468 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:40:28,743 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb40a93cc8898eb49 with lease ID 0xa2eed1c4ba79aaac: Processing first storage report for DS-7609d11d-05bd-49a2-b9b4-b7e0c0baa2ea from datanode DatanodeRegistration(127.0.0.1:39019, datanodeUuid=08c7a6d9-8a7c-4faa-bd17-4abb77cf7c6f, infoPort=37693, infoSecurePort=0, ipcPort=42819, storageInfo=lv=-57;cid=testClusterID;nsid=20045911;c=1731710426813) 2024-11-15T22:40:28,743 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb40a93cc8898eb49 with lease ID 0xa2eed1c4ba79aaac: from storage DS-7609d11d-05bd-49a2-b9b4-b7e0c0baa2ea node DatanodeRegistration(127.0.0.1:39019, datanodeUuid=08c7a6d9-8a7c-4faa-bd17-4abb77cf7c6f, infoPort=37693, infoSecurePort=0, ipcPort=42819, storageInfo=lv=-57;cid=testClusterID;nsid=20045911;c=1731710426813), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:40:28,743 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb40a93cc8898eb49 with lease ID 0xa2eed1c4ba79aaac: Processing first storage report for DS-91eebca7-ba16-4b6c-81ab-136f57a17ed1 from datanode DatanodeRegistration(127.0.0.1:39019, datanodeUuid=08c7a6d9-8a7c-4faa-bd17-4abb77cf7c6f, infoPort=37693, infoSecurePort=0, ipcPort=42819, storageInfo=lv=-57;cid=testClusterID;nsid=20045911;c=1731710426813) 2024-11-15T22:40:28,743 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb40a93cc8898eb49 with lease ID 0xa2eed1c4ba79aaac: from storage DS-91eebca7-ba16-4b6c-81ab-136f57a17ed1 node DatanodeRegistration(127.0.0.1:39019, datanodeUuid=08c7a6d9-8a7c-4faa-bd17-4abb77cf7c6f, infoPort=37693, infoSecurePort=0, ipcPort=42819, storageInfo=lv=-57;cid=testClusterID;nsid=20045911;c=1731710426813), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:40:28,863 WARN [Thread-2516 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/cluster_2e2cd3ae-1b73-fbc0-9910-a5385aa1f816/data/data4/current/BP-197085225-172.17.0.3-1731710426813/current, will proceed with Du for space computation calculation, 2024-11-15T22:40:28,863 WARN [Thread-2515 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/cluster_2e2cd3ae-1b73-fbc0-9910-a5385aa1f816/data/data3/current/BP-197085225-172.17.0.3-1731710426813/current, will proceed with Du for space computation calculation, 2024-11-15T22:40:28,884 WARN [Thread-2491 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T22:40:28,886 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x44ffb21a1f60bc42 with lease ID 0xa2eed1c4ba79aaad: Processing first storage report for DS-e6f0b707-8463-4042-8e50-e4d33e08c225 from datanode DatanodeRegistration(127.0.0.1:40007, datanodeUuid=93025969-7f44-490a-8e06-4c933f1de0b1, infoPort=34895, infoSecurePort=0, ipcPort=42571, storageInfo=lv=-57;cid=testClusterID;nsid=20045911;c=1731710426813) 2024-11-15T22:40:28,886 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x44ffb21a1f60bc42 with lease ID 0xa2eed1c4ba79aaad: from storage DS-e6f0b707-8463-4042-8e50-e4d33e08c225 node DatanodeRegistration(127.0.0.1:40007, datanodeUuid=93025969-7f44-490a-8e06-4c933f1de0b1, infoPort=34895, infoSecurePort=0, ipcPort=42571, storageInfo=lv=-57;cid=testClusterID;nsid=20045911;c=1731710426813), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:40:28,886 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x44ffb21a1f60bc42 with lease ID 0xa2eed1c4ba79aaad: Processing first storage report for DS-5d4aff39-c682-4027-9a8a-14641a7b6537 from datanode DatanodeRegistration(127.0.0.1:40007, datanodeUuid=93025969-7f44-490a-8e06-4c933f1de0b1, infoPort=34895, infoSecurePort=0, ipcPort=42571, storageInfo=lv=-57;cid=testClusterID;nsid=20045911;c=1731710426813) 2024-11-15T22:40:28,886 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x44ffb21a1f60bc42 with lease ID 0xa2eed1c4ba79aaad: from storage DS-5d4aff39-c682-4027-9a8a-14641a7b6537 node DatanodeRegistration(127.0.0.1:40007, datanodeUuid=93025969-7f44-490a-8e06-4c933f1de0b1, infoPort=34895, infoSecurePort=0, ipcPort=42571, storageInfo=lv=-57;cid=testClusterID;nsid=20045911;c=1731710426813), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T22:40:28,941 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c 2024-11-15T22:40:28,943 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/cluster_2e2cd3ae-1b73-fbc0-9910-a5385aa1f816/zookeeper_0, clientPort=51445, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/cluster_2e2cd3ae-1b73-fbc0-9910-a5385aa1f816/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/cluster_2e2cd3ae-1b73-fbc0-9910-a5385aa1f816/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T22:40:28,944 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51445 2024-11-15T22:40:28,944 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:40:28,946 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:40:28,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741825_1001 (size=7) 2024-11-15T22:40:28,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741825_1001 (size=7) 2024-11-15T22:40:28,955 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff with version=8 2024-11-15T22:40:28,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39371/user/jenkins/test-data/2ed060d0-b118-5ee0-1cdc-08c0d67d57b9/hbase-staging 2024-11-15T22:40:28,957 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e611192d6313:0 server-side Connection retries=45 2024-11-15T22:40:28,957 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:40:28,957 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T22:40:28,957 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T22:40:28,957 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:40:28,957 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T22:40:28,957 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T22:40:28,957 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T22:40:28,958 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34029 2024-11-15T22:40:28,959 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34029 connecting to ZooKeeper ensemble=127.0.0.1:51445 2024-11-15T22:40:29,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:340290x0, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T22:40:29,110 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34029-0x10140a822ba0000 connected 2024-11-15T22:40:29,197 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:40:29,198 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:40:29,200 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:40:29,200 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff, hbase.cluster.distributed=false 2024-11-15T22:40:29,202 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T22:40:29,202 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34029 2024-11-15T22:40:29,202 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34029 2024-11-15T22:40:29,202 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34029 2024-11-15T22:40:29,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34029 2024-11-15T22:40:29,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34029 2024-11-15T22:40:29,220 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e611192d6313:0 server-side Connection retries=45 2024-11-15T22:40:29,220 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:40:29,220 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T22:40:29,220 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T22:40:29,220 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T22:40:29,220 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T22:40:29,220 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T22:40:29,221 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T22:40:29,221 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37867 2024-11-15T22:40:29,222 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37867 connecting to ZooKeeper ensemble=127.0.0.1:51445 2024-11-15T22:40:29,222 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:40:29,223 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:40:29,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:29,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:29,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:378670x0, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T22:40:29,233 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37867-0x10140a822ba0001 connected 2024-11-15T22:40:29,233 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:40:29,234 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T22:40:29,234 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T22:40:29,235 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T22:40:29,236 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T22:40:29,236 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37867 2024-11-15T22:40:29,236 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37867 2024-11-15T22:40:29,236 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37867 2024-11-15T22:40:29,237 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37867 2024-11-15T22:40:29,237 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37867 2024-11-15T22:40:29,249 DEBUG [M:0;e611192d6313:34029 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e611192d6313:34029 2024-11-15T22:40:29,250 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e611192d6313,34029,1731710428957 2024-11-15T22:40:29,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:40:29,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:40:29,261 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e611192d6313,34029,1731710428957 2024-11-15T22:40:29,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T22:40:29,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:29,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:29,271 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T22:40:29,272 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e611192d6313,34029,1731710428957 from backup master directory 2024-11-15T22:40:29,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:40:29,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e611192d6313,34029,1731710428957 2024-11-15T22:40:29,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T22:40:29,282 WARN [master/e611192d6313:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T22:40:29,282 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e611192d6313,34029,1731710428957 2024-11-15T22:40:29,288 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/hbase.id] with ID: 6e40f1d3-fe88-4e5e-a522-aafee0ced7c5 2024-11-15T22:40:29,288 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/.tmp/hbase.id 2024-11-15T22:40:29,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741826_1002 (size=42) 2024-11-15T22:40:29,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741826_1002 (size=42) 2024-11-15T22:40:29,298 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/.tmp/hbase.id]:[hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/hbase.id] 2024-11-15T22:40:29,310 INFO [master/e611192d6313:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:40:29,310 INFO [master/e611192d6313:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T22:40:29,311 INFO [master/e611192d6313:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-15T22:40:29,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:29,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:29,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741827_1003 (size=196) 2024-11-15T22:40:29,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741827_1003 (size=196) 2024-11-15T22:40:29,329 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T22:40:29,330 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T22:40:29,330 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:40:29,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741828_1004 (size=1189) 2024-11-15T22:40:29,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741828_1004 (size=1189) 2024-11-15T22:40:29,337 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store 2024-11-15T22:40:29,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741829_1005 (size=34) 2024-11-15T22:40:29,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741829_1005 (size=34) 2024-11-15T22:40:29,343 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:40:29,343 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T22:40:29,343 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:40:29,343 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:40:29,343 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T22:40:29,343 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:40:29,343 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:40:29,343 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731710429343Disabling compacts and flushes for region at 1731710429343Disabling writes for close at 1731710429343Writing region close event to WAL at 1731710429343Closed at 1731710429343 2024-11-15T22:40:29,344 WARN [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/.initializing 2024-11-15T22:40:29,345 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/WALs/e611192d6313,34029,1731710428957 2024-11-15T22:40:29,348 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C34029%2C1731710428957, suffix=, logDir=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/WALs/e611192d6313,34029,1731710428957, archiveDir=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/oldWALs, maxLogs=10 2024-11-15T22:40:29,348 INFO [master/e611192d6313:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C34029%2C1731710428957.1731710429348 2024-11-15T22:40:29,353 INFO [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/WALs/e611192d6313,34029,1731710428957/e611192d6313%2C34029%2C1731710428957.1731710429348 2024-11-15T22:40:29,354 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37693:37693),(127.0.0.1/127.0.0.1:34895:34895)] 2024-11-15T22:40:29,354 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:40:29,355 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:40:29,355 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:40:29,355 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:40:29,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:40:29,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T22:40:29,357 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:29,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:40:29,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:40:29,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T22:40:29,358 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:29,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:40:29,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:40:29,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T22:40:29,359 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:29,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:40:29,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:40:29,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T22:40:29,360 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:29,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T22:40:29,361 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:40:29,362 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:40:29,362 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:40:29,363 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:40:29,363 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:40:29,363 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T22:40:29,364 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T22:40:29,366 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:40:29,367 INFO [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=730566, jitterRate=-0.07103849947452545}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T22:40:29,367 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731710429355Initializing all the Stores at 1731710429355Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710429355Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710429356 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710429356Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710429356Cleaning up temporary data from old regions at 1731710429363 (+7 ms)Region opened successfully at 1731710429367 (+4 ms) 2024-11-15T22:40:29,367 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T22:40:29,370 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@717417a4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e611192d6313/172.17.0.3:0 2024-11-15T22:40:29,371 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T22:40:29,371 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T22:40:29,371 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T22:40:29,371 INFO [master/e611192d6313:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T22:40:29,372 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T22:40:29,372 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T22:40:29,372 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T22:40:29,374 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T22:40:29,374 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T22:40:29,386 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T22:40:29,387 INFO [master/e611192d6313:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T22:40:29,387 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T22:40:29,397 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T22:40:29,397 INFO [master/e611192d6313:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T22:40:29,398 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T22:40:29,407 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T22:40:29,408 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T22:40:29,418 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T22:40:29,420 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T22:40:29,433 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T22:40:29,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T22:40:29,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T22:40:29,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:29,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:29,444 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e611192d6313,34029,1731710428957, sessionid=0x10140a822ba0000, setting cluster-up flag (Was=false) 2024-11-15T22:40:29,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:29,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:29,496 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T22:40:29,497 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e611192d6313,34029,1731710428957 2024-11-15T22:40:29,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:29,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:29,643 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T22:40:29,646 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e611192d6313,34029,1731710428957 2024-11-15T22:40:29,649 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T22:40:29,651 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T22:40:29,652 INFO [master/e611192d6313:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T22:40:29,652 INFO [master/e611192d6313:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T22:40:29,652 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e611192d6313,34029,1731710428957 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T22:40:29,654 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:40:29,654 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:40:29,655 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:40:29,655 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e611192d6313:0, corePoolSize=5, maxPoolSize=5 2024-11-15T22:40:29,655 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e611192d6313:0, corePoolSize=10, maxPoolSize=10 2024-11-15T22:40:29,655 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:40:29,655 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e611192d6313:0, corePoolSize=2, maxPoolSize=2 2024-11-15T22:40:29,655 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:40:29,656 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731710459656 2024-11-15T22:40:29,656 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T22:40:29,656 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T22:40:29,656 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T22:40:29,656 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T22:40:29,657 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T22:40:29,657 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T22:40:29,657 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:29,657 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:40:29,657 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T22:40:29,657 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T22:40:29,657 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T22:40:29,657 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T22:40:29,658 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T22:40:29,658 INFO [master/e611192d6313:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T22:40:29,658 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710429658,5,FailOnTimeoutGroup] 2024-11-15T22:40:29,658 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710429658,5,FailOnTimeoutGroup] 2024-11-15T22:40:29,658 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:29,658 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:29,658 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T22:40:29,658 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:29,658 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:29,658 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T22:40:29,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741831_1007 (size=1321) 2024-11-15T22:40:29,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741831_1007 (size=1321) 2024-11-15T22:40:29,665 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T22:40:29,665 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff 2024-11-15T22:40:29,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741832_1008 (size=32) 2024-11-15T22:40:29,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741832_1008 (size=32) 2024-11-15T22:40:29,673 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:40:29,674 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T22:40:29,675 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T22:40:29,675 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:29,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:40:29,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T22:40:29,676 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T22:40:29,676 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:29,676 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:40:29,677 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T22:40:29,677 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T22:40:29,677 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:29,678 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:40:29,678 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T22:40:29,678 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T22:40:29,678 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:29,679 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:40:29,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T22:40:29,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/data/hbase/meta/1588230740 2024-11-15T22:40:29,680 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/data/hbase/meta/1588230740 2024-11-15T22:40:29,681 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T22:40:29,681 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T22:40:29,681 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T22:40:29,682 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T22:40:29,684 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T22:40:29,684 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=831100, jitterRate=0.05679868161678314}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T22:40:29,685 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731710429673Initializing all the Stores at 1731710429674 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710429674Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710429674Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710429674Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710429674Cleaning up temporary data from old regions at 1731710429681 (+7 ms)Region opened successfully at 1731710429685 (+4 ms) 2024-11-15T22:40:29,685 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T22:40:29,685 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T22:40:29,685 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T22:40:29,685 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T22:40:29,685 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T22:40:29,685 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T22:40:29,685 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731710429685Disabling compacts and flushes for region at 1731710429685Disabling writes for close at 1731710429685Writing region close event to WAL at 1731710429685Closed at 1731710429685 2024-11-15T22:40:29,686 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:40:29,687 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T22:40:29,687 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T22:40:29,688 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T22:40:29,688 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T22:40:29,740 INFO [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer(746): ClusterId : 6e40f1d3-fe88-4e5e-a522-aafee0ced7c5 2024-11-15T22:40:29,740 DEBUG [RS:0;e611192d6313:37867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T22:40:29,750 DEBUG [RS:0;e611192d6313:37867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T22:40:29,750 DEBUG [RS:0;e611192d6313:37867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T22:40:29,760 DEBUG [RS:0;e611192d6313:37867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T22:40:29,761 DEBUG [RS:0;e611192d6313:37867 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47b594f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e611192d6313/172.17.0.3:0 2024-11-15T22:40:29,772 DEBUG [RS:0;e611192d6313:37867 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e611192d6313:37867 2024-11-15T22:40:29,772 INFO [RS:0;e611192d6313:37867 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T22:40:29,772 INFO [RS:0;e611192d6313:37867 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T22:40:29,772 DEBUG [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T22:40:29,773 INFO [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer(2659): reportForDuty to master=e611192d6313,34029,1731710428957 with port=37867, startcode=1731710429220 2024-11-15T22:40:29,773 DEBUG [RS:0;e611192d6313:37867 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T22:40:29,775 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39207, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T22:40:29,775 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34029 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e611192d6313,37867,1731710429220 2024-11-15T22:40:29,775 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34029 {}] master.ServerManager(517): Registering regionserver=e611192d6313,37867,1731710429220 2024-11-15T22:40:29,777 DEBUG [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff 2024-11-15T22:40:29,777 DEBUG [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41451 2024-11-15T22:40:29,777 DEBUG [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T22:40:29,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:40:29,787 DEBUG [RS:0;e611192d6313:37867 {}] zookeeper.ZKUtil(111): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e611192d6313,37867,1731710429220 2024-11-15T22:40:29,787 WARN [RS:0;e611192d6313:37867 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T22:40:29,787 INFO [RS:0;e611192d6313:37867 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:40:29,787 DEBUG [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/WALs/e611192d6313,37867,1731710429220 2024-11-15T22:40:29,787 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e611192d6313,37867,1731710429220] 2024-11-15T22:40:29,790 INFO [RS:0;e611192d6313:37867 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T22:40:29,792 INFO [RS:0;e611192d6313:37867 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T22:40:29,793 INFO [RS:0;e611192d6313:37867 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T22:40:29,793 INFO [RS:0;e611192d6313:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:29,793 INFO [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T22:40:29,794 INFO [RS:0;e611192d6313:37867 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T22:40:29,794 INFO [RS:0;e611192d6313:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:29,794 DEBUG [RS:0;e611192d6313:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:40:29,794 DEBUG [RS:0;e611192d6313:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:40:29,794 DEBUG [RS:0;e611192d6313:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:40:29,794 DEBUG [RS:0;e611192d6313:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:40:29,794 DEBUG [RS:0;e611192d6313:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:40:29,794 DEBUG [RS:0;e611192d6313:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e611192d6313:0, corePoolSize=2, maxPoolSize=2 2024-11-15T22:40:29,794 DEBUG [RS:0;e611192d6313:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:40:29,794 DEBUG [RS:0;e611192d6313:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:40:29,794 DEBUG [RS:0;e611192d6313:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:40:29,795 DEBUG [RS:0;e611192d6313:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:40:29,795 DEBUG [RS:0;e611192d6313:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:40:29,795 DEBUG [RS:0;e611192d6313:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e611192d6313:0, corePoolSize=1, maxPoolSize=1 2024-11-15T22:40:29,795 DEBUG [RS:0;e611192d6313:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:40:29,795 DEBUG [RS:0;e611192d6313:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e611192d6313:0, corePoolSize=3, maxPoolSize=3 2024-11-15T22:40:29,795 INFO [RS:0;e611192d6313:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:29,795 INFO [RS:0;e611192d6313:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:29,795 INFO [RS:0;e611192d6313:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:29,795 INFO [RS:0;e611192d6313:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:29,796 INFO [RS:0;e611192d6313:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:29,796 INFO [RS:0;e611192d6313:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,37867,1731710429220-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T22:40:29,812 INFO [RS:0;e611192d6313:37867 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T22:40:29,813 INFO [RS:0;e611192d6313:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,37867,1731710429220-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:29,813 INFO [RS:0;e611192d6313:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:29,813 INFO [RS:0;e611192d6313:37867 {}] regionserver.Replication(171): e611192d6313,37867,1731710429220 started 2024-11-15T22:40:29,824 INFO [RS:0;e611192d6313:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:29,824 INFO [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer(1482): Serving as e611192d6313,37867,1731710429220, RpcServer on e611192d6313/172.17.0.3:37867, sessionid=0x10140a822ba0001 2024-11-15T22:40:29,824 DEBUG [RS:0;e611192d6313:37867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T22:40:29,824 DEBUG [RS:0;e611192d6313:37867 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e611192d6313,37867,1731710429220 2024-11-15T22:40:29,824 DEBUG [RS:0;e611192d6313:37867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,37867,1731710429220' 2024-11-15T22:40:29,824 DEBUG [RS:0;e611192d6313:37867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T22:40:29,825 DEBUG [RS:0;e611192d6313:37867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T22:40:29,825 DEBUG [RS:0;e611192d6313:37867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T22:40:29,825 DEBUG [RS:0;e611192d6313:37867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T22:40:29,825 DEBUG [RS:0;e611192d6313:37867 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e611192d6313,37867,1731710429220 2024-11-15T22:40:29,825 DEBUG [RS:0;e611192d6313:37867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e611192d6313,37867,1731710429220' 2024-11-15T22:40:29,825 DEBUG [RS:0;e611192d6313:37867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T22:40:29,826 DEBUG [RS:0;e611192d6313:37867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T22:40:29,826 DEBUG [RS:0;e611192d6313:37867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T22:40:29,826 INFO [RS:0;e611192d6313:37867 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T22:40:29,826 INFO [RS:0;e611192d6313:37867 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T22:40:29,839 WARN [e611192d6313:34029 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T22:40:29,928 INFO [RS:0;e611192d6313:37867 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C37867%2C1731710429220, suffix=, logDir=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/WALs/e611192d6313,37867,1731710429220, archiveDir=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/oldWALs, maxLogs=32 2024-11-15T22:40:29,929 INFO [RS:0;e611192d6313:37867 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C37867%2C1731710429220.1731710429929 2024-11-15T22:40:29,935 INFO [RS:0;e611192d6313:37867 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/WALs/e611192d6313,37867,1731710429220/e611192d6313%2C37867%2C1731710429220.1731710429929 2024-11-15T22:40:29,936 DEBUG [RS:0;e611192d6313:37867 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37693:37693),(127.0.0.1/127.0.0.1:34895:34895)] 2024-11-15T22:40:30,089 DEBUG [e611192d6313:34029 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T22:40:30,090 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e611192d6313,37867,1731710429220 2024-11-15T22:40:30,093 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e611192d6313,37867,1731710429220, state=OPENING 2024-11-15T22:40:30,106 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T22:40:30,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:30,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:30,199 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T22:40:30,200 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:40:30,200 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:40:30,200 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e611192d6313,37867,1731710429220}] 2024-11-15T22:40:30,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,44569,1731710246574/e611192d6313%2C44569%2C1731710246574.meta.1731710247599.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:30,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41079/user/jenkins/test-data/51c20d6b-8c92-4bf7-c6cb-446a46087c25/WALs/e611192d6313,45181,1731710247742/e611192d6313%2C45181%2C1731710247742.1731710247973 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T22:40:30,356 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T22:40:30,362 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52049, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T22:40:30,367 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T22:40:30,367 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:40:30,369 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e611192d6313%2C37867%2C1731710429220.meta, suffix=.meta, logDir=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/WALs/e611192d6313,37867,1731710429220, archiveDir=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/oldWALs, maxLogs=32 2024-11-15T22:40:30,369 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e611192d6313%2C37867%2C1731710429220.meta.1731710430369.meta 2024-11-15T22:40:30,377 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/WALs/e611192d6313,37867,1731710429220/e611192d6313%2C37867%2C1731710429220.meta.1731710430369.meta 2024-11-15T22:40:30,380 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34895:34895),(127.0.0.1/127.0.0.1:37693:37693)] 2024-11-15T22:40:30,381 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T22:40:30,381 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T22:40:30,381 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T22:40:30,381 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T22:40:30,381 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T22:40:30,381 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T22:40:30,381 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T22:40:30,381 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T22:40:30,383 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T22:40:30,383 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T22:40:30,383 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:30,384 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:40:30,384 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T22:40:30,384 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T22:40:30,384 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:30,385 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:40:30,385 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T22:40:30,385 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T22:40:30,385 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:30,386 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:40:30,386 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T22:40:30,386 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T22:40:30,386 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T22:40:30,387 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T22:40:30,387 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T22:40:30,387 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/data/hbase/meta/1588230740 2024-11-15T22:40:30,388 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/data/hbase/meta/1588230740 2024-11-15T22:40:30,389 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T22:40:30,389 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T22:40:30,389 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T22:40:30,390 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T22:40:30,391 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748335, jitterRate=-0.048444077372550964}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T22:40:30,391 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T22:40:30,391 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731710430382Writing region info on filesystem at 1731710430382Initializing all the Stores at 1731710430382Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710430382Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710430382Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731710430382Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731710430382Cleaning up temporary data from old regions at 1731710430389 (+7 ms)Running coprocessor post-open hooks at 1731710430391 (+2 ms)Region opened successfully at 1731710430391 2024-11-15T22:40:30,392 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731710430355 2024-11-15T22:40:30,394 DEBUG [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T22:40:30,394 INFO [RS_OPEN_META-regionserver/e611192d6313:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T22:40:30,395 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e611192d6313,37867,1731710429220 2024-11-15T22:40:30,395 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e611192d6313,37867,1731710429220, state=OPEN 2024-11-15T22:40:30,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T22:40:30,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T22:40:30,435 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e611192d6313,37867,1731710429220 2024-11-15T22:40:30,435 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:40:30,435 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T22:40:30,439 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T22:40:30,439 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e611192d6313,37867,1731710429220 in 235 msec 2024-11-15T22:40:30,442 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T22:40:30,442 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 752 msec 2024-11-15T22:40:30,444 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T22:40:30,444 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T22:40:30,446 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T22:40:30,446 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e611192d6313,37867,1731710429220, seqNum=-1] 2024-11-15T22:40:30,446 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T22:40:30,448 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34641, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T22:40:30,455 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 804 msec 2024-11-15T22:40:30,455 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731710430455, completionTime=-1 2024-11-15T22:40:30,456 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T22:40:30,456 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T22:40:30,459 INFO [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T22:40:30,459 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731710490459 2024-11-15T22:40:30,459 INFO [master/e611192d6313:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731710550459 2024-11-15T22:40:30,459 INFO [master/e611192d6313:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 3 msec 2024-11-15T22:40:30,459 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34029,1731710428957-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:30,459 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34029,1731710428957-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:30,459 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34029,1731710428957-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:30,459 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e611192d6313:34029, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:30,459 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:30,460 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:30,462 DEBUG [master/e611192d6313:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T22:40:30,466 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.184sec 2024-11-15T22:40:30,466 INFO [master/e611192d6313:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T22:40:30,466 INFO [master/e611192d6313:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T22:40:30,466 INFO [master/e611192d6313:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T22:40:30,466 INFO [master/e611192d6313:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T22:40:30,466 INFO [master/e611192d6313:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T22:40:30,466 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34029,1731710428957-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T22:40:30,466 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34029,1731710428957-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T22:40:30,470 DEBUG [master/e611192d6313:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T22:40:30,470 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T22:40:30,470 INFO [master/e611192d6313:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e611192d6313,34029,1731710428957-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T22:40:30,542 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f6dbfc5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:40:30,542 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e611192d6313,34029,-1 for getting cluster id 2024-11-15T22:40:30,543 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T22:40:30,545 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6e40f1d3-fe88-4e5e-a522-aafee0ced7c5' 2024-11-15T22:40:30,545 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T22:40:30,546 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6e40f1d3-fe88-4e5e-a522-aafee0ced7c5" 2024-11-15T22:40:30,546 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f7222f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:40:30,546 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e611192d6313,34029,-1] 2024-11-15T22:40:30,546 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T22:40:30,547 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:40:30,548 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46590, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T22:40:30,549 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a016632, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T22:40:30,549 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T22:40:30,550 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e611192d6313,37867,1731710429220, seqNum=-1] 2024-11-15T22:40:30,551 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T22:40:30,552 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43990, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T22:40:30,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e611192d6313,34029,1731710428957 2024-11-15T22:40:30,554 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T22:40:30,557 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T22:40:30,557 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T22:40:30,560 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/WALs/test.com,8080,1, archiveDir=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/oldWALs, maxLogs=32 2024-11-15T22:40:30,560 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731710430560 2024-11-15T22:40:30,566 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/WALs/test.com,8080,1/test.com%2C8080%2C1.1731710430560 2024-11-15T22:40:30,567 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34895:34895),(127.0.0.1/127.0.0.1:37693:37693)] 2024-11-15T22:40:30,568 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731710430568 2024-11-15T22:40:30,574 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,574 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,574 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,574 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,574 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,575 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/WALs/test.com,8080,1/test.com%2C8080%2C1.1731710430560 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/WALs/test.com,8080,1/test.com%2C8080%2C1.1731710430568 2024-11-15T22:40:30,576 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34895:34895),(127.0.0.1/127.0.0.1:37693:37693)] 2024-11-15T22:40:30,576 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/WALs/test.com,8080,1/test.com%2C8080%2C1.1731710430560 is not closed yet, will try archiving it next time 2024-11-15T22:40:30,576 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741835_1011 (size=93) 2024-11-15T22:40:30,577 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741835_1011 (size=93) 2024-11-15T22:40:30,577 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,577 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,577 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741836_1012 (size=93) 2024-11-15T22:40:30,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741836_1012 (size=93) 2024-11-15T22:40:30,580 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/WALs/test.com,8080,1/test.com%2C8080%2C1.1731710430560 to hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/oldWALs/test.com%2C8080%2C1.1731710430560 2024-11-15T22:40:30,583 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/oldWALs 2024-11-15T22:40:30,583 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731710430568) 2024-11-15T22:40:30,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T22:40:30,583 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T22:40:30,583 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:40:30,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:40:30,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:40:30,583 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T22:40:30,584 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T22:40:30,584 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=776596282, stopped=false 2024-11-15T22:40:30,584 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e611192d6313,34029,1731710428957 2024-11-15T22:40:30,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T22:40:30,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T22:40:30,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:30,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:30,601 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T22:40:30,601 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T22:40:30,602 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:40:30,602 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:40:30,602 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:40:30,602 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e611192d6313,37867,1731710429220' ***** 2024-11-15T22:40:30,602 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T22:40:30,602 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T22:40:30,602 INFO [RS:0;e611192d6313:37867 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T22:40:30,602 INFO [RS:0;e611192d6313:37867 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T22:40:30,602 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T22:40:30,602 INFO [RS:0;e611192d6313:37867 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T22:40:30,602 INFO [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer(959): stopping server e611192d6313,37867,1731710429220 2024-11-15T22:40:30,602 INFO [RS:0;e611192d6313:37867 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T22:40:30,602 INFO [RS:0;e611192d6313:37867 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e611192d6313:37867. 2024-11-15T22:40:30,602 DEBUG [RS:0;e611192d6313:37867 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T22:40:30,602 DEBUG [RS:0;e611192d6313:37867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:40:30,602 INFO [RS:0;e611192d6313:37867 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T22:40:30,602 INFO [RS:0;e611192d6313:37867 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T22:40:30,602 INFO [RS:0;e611192d6313:37867 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T22:40:30,603 INFO [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T22:40:30,603 INFO [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-15T22:40:30,603 DEBUG [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-15T22:40:30,603 DEBUG [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-15T22:40:30,603 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T22:40:30,603 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T22:40:30,603 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T22:40:30,603 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T22:40:30,603 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T22:40:30,603 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-15T22:40:30,622 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/data/hbase/meta/1588230740/.tmp/ns/4d38877938a647709493fc5abbb7fab4 is 43, key is default/ns:d/1731710430448/Put/seqid=0 2024-11-15T22:40:30,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741837_1013 (size=5153) 2024-11-15T22:40:30,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741837_1013 (size=5153) 2024-11-15T22:40:30,627 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/data/hbase/meta/1588230740/.tmp/ns/4d38877938a647709493fc5abbb7fab4 2024-11-15T22:40:30,632 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/data/hbase/meta/1588230740/.tmp/ns/4d38877938a647709493fc5abbb7fab4 as hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/data/hbase/meta/1588230740/ns/4d38877938a647709493fc5abbb7fab4 2024-11-15T22:40:30,636 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/data/hbase/meta/1588230740/ns/4d38877938a647709493fc5abbb7fab4, entries=2, sequenceid=6, filesize=5.0 K 2024-11-15T22:40:30,637 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 34ms, sequenceid=6, compaction requested=false 2024-11-15T22:40:30,637 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T22:40:30,641 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T22:40:30,642 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T22:40:30,642 INFO [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T22:40:30,642 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731710430603Running coprocessor pre-close hooks at 1731710430603Disabling compacts and flushes for region at 1731710430603Disabling writes for close at 1731710430603Obtaining lock to block concurrent updates at 1731710430603Preparing flush snapshotting stores in 1588230740 at 1731710430603Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731710430603Flushing stores of hbase:meta,,1.1588230740 at 1731710430604 (+1 ms)Flushing 1588230740/ns: creating writer at 1731710430604Flushing 1588230740/ns: appending metadata at 1731710430621 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731710430621Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c632b98: reopening flushed file at 1731710430631 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 34ms, sequenceid=6, compaction requested=false at 1731710430637 (+6 ms)Writing region close event to WAL at 1731710430638 (+1 ms)Running coprocessor post-close hooks at 1731710430642 (+4 ms)Closed at 1731710430642 2024-11-15T22:40:30,642 DEBUG [RS_CLOSE_META-regionserver/e611192d6313:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T22:40:30,796 INFO [regionserver/e611192d6313:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T22:40:30,796 INFO [regionserver/e611192d6313:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T22:40:30,803 INFO [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer(976): stopping server e611192d6313,37867,1731710429220; all regions closed. 2024-11-15T22:40:30,804 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,804 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,804 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,804 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,804 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741834_1010 (size=1152) 2024-11-15T22:40:30,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741834_1010 (size=1152) 2024-11-15T22:40:30,811 DEBUG [RS:0;e611192d6313:37867 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/oldWALs 2024-11-15T22:40:30,811 INFO [RS:0;e611192d6313:37867 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e611192d6313%2C37867%2C1731710429220.meta:.meta(num 1731710430369) 2024-11-15T22:40:30,811 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,812 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,812 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,812 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,812 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741833_1009 (size=93) 2024-11-15T22:40:30,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741833_1009 (size=93) 2024-11-15T22:40:30,817 DEBUG [RS:0;e611192d6313:37867 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/oldWALs 2024-11-15T22:40:30,817 INFO [RS:0;e611192d6313:37867 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e611192d6313%2C37867%2C1731710429220:(num 1731710429929) 2024-11-15T22:40:30,818 DEBUG [RS:0;e611192d6313:37867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T22:40:30,818 INFO [RS:0;e611192d6313:37867 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T22:40:30,818 INFO [RS:0;e611192d6313:37867 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T22:40:30,818 INFO [RS:0;e611192d6313:37867 {}] hbase.ChoreService(370): Chore service for: regionserver/e611192d6313:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T22:40:30,818 INFO [RS:0;e611192d6313:37867 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T22:40:30,818 INFO [regionserver/e611192d6313:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T22:40:30,818 INFO [RS:0;e611192d6313:37867 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37867 2024-11-15T22:40:30,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e611192d6313,37867,1731710429220 2024-11-15T22:40:30,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T22:40:30,829 INFO [RS:0;e611192d6313:37867 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T22:40:30,839 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e611192d6313,37867,1731710429220] 2024-11-15T22:40:30,849 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e611192d6313,37867,1731710429220 already deleted, retry=false 2024-11-15T22:40:30,849 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e611192d6313,37867,1731710429220 expired; onlineServers=0 2024-11-15T22:40:30,849 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e611192d6313,34029,1731710428957' ***** 2024-11-15T22:40:30,849 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T22:40:30,849 INFO [M:0;e611192d6313:34029 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T22:40:30,850 INFO [M:0;e611192d6313:34029 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T22:40:30,850 DEBUG [M:0;e611192d6313:34029 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T22:40:30,850 DEBUG [M:0;e611192d6313:34029 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T22:40:30,850 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T22:40:30,850 DEBUG [master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710429658 {}] cleaner.HFileCleaner(306): Exit Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.large.0-1731710429658,5,FailOnTimeoutGroup] 2024-11-15T22:40:30,850 DEBUG [master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710429658 {}] cleaner.HFileCleaner(306): Exit Thread[master/e611192d6313:0:becomeActiveMaster-HFileCleaner.small.0-1731710429658,5,FailOnTimeoutGroup] 2024-11-15T22:40:30,850 INFO [M:0;e611192d6313:34029 {}] hbase.ChoreService(370): Chore service for: master/e611192d6313:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T22:40:30,850 INFO [M:0;e611192d6313:34029 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T22:40:30,850 DEBUG [M:0;e611192d6313:34029 {}] master.HMaster(1795): Stopping service threads 2024-11-15T22:40:30,850 INFO [M:0;e611192d6313:34029 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T22:40:30,850 INFO [M:0;e611192d6313:34029 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T22:40:30,850 INFO [M:0;e611192d6313:34029 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T22:40:30,850 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T22:40:30,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T22:40:30,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T22:40:30,860 DEBUG [M:0;e611192d6313:34029 {}] zookeeper.ZKUtil(347): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T22:40:30,860 WARN [M:0;e611192d6313:34029 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T22:40:30,861 INFO [M:0;e611192d6313:34029 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/.lastflushedseqids 2024-11-15T22:40:30,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741838_1014 (size=99) 2024-11-15T22:40:30,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741838_1014 (size=99) 2024-11-15T22:40:30,867 INFO [M:0;e611192d6313:34029 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T22:40:30,868 INFO [M:0;e611192d6313:34029 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T22:40:30,868 DEBUG [M:0;e611192d6313:34029 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T22:40:30,868 INFO [M:0;e611192d6313:34029 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:40:30,868 DEBUG [M:0;e611192d6313:34029 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:40:30,868 DEBUG [M:0;e611192d6313:34029 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T22:40:30,868 DEBUG [M:0;e611192d6313:34029 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:40:30,868 INFO [M:0;e611192d6313:34029 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-15T22:40:30,886 DEBUG [M:0;e611192d6313:34029 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/56dcea098add41f59b116de876d922a9 is 82, key is hbase:meta,,1/info:regioninfo/1731710430394/Put/seqid=0 2024-11-15T22:40:30,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741839_1015 (size=5672) 2024-11-15T22:40:30,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741839_1015 (size=5672) 2024-11-15T22:40:30,890 INFO [M:0;e611192d6313:34029 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/56dcea098add41f59b116de876d922a9 2024-11-15T22:40:30,907 DEBUG [M:0;e611192d6313:34029 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5434c60a33da4bb1b564afebd8bb4b52 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731710430454/Put/seqid=0 2024-11-15T22:40:30,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741840_1016 (size=5275) 2024-11-15T22:40:30,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741840_1016 (size=5275) 2024-11-15T22:40:30,911 INFO [M:0;e611192d6313:34029 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5434c60a33da4bb1b564afebd8bb4b52 2024-11-15T22:40:30,926 DEBUG [M:0;e611192d6313:34029 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d23e2f1a3cb5403a9454f24977ac462a is 69, key is e611192d6313,37867,1731710429220/rs:state/1731710429776/Put/seqid=0 2024-11-15T22:40:30,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741841_1017 (size=5156) 2024-11-15T22:40:30,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741841_1017 (size=5156) 2024-11-15T22:40:30,931 INFO [M:0;e611192d6313:34029 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d23e2f1a3cb5403a9454f24977ac462a 2024-11-15T22:40:30,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:40:30,939 INFO [RS:0;e611192d6313:37867 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T22:40:30,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x10140a822ba0001, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:40:30,939 INFO [RS:0;e611192d6313:37867 {}] regionserver.HRegionServer(1031): Exiting; stopping=e611192d6313,37867,1731710429220; zookeeper connection closed. 2024-11-15T22:40:30,939 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@50f5155e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@50f5155e 2024-11-15T22:40:30,940 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T22:40:30,947 DEBUG [M:0;e611192d6313:34029 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0c8b617f10b041a7967b21937d1ec54e is 52, key is load_balancer_on/state:d/1731710430556/Put/seqid=0 2024-11-15T22:40:30,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741842_1018 (size=5056) 2024-11-15T22:40:30,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741842_1018 (size=5056) 2024-11-15T22:40:30,951 INFO [M:0;e611192d6313:34029 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0c8b617f10b041a7967b21937d1ec54e 2024-11-15T22:40:30,955 DEBUG [M:0;e611192d6313:34029 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/56dcea098add41f59b116de876d922a9 as hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/56dcea098add41f59b116de876d922a9 2024-11-15T22:40:30,959 INFO [M:0;e611192d6313:34029 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/56dcea098add41f59b116de876d922a9, entries=8, sequenceid=29, filesize=5.5 K 2024-11-15T22:40:30,960 DEBUG [M:0;e611192d6313:34029 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5434c60a33da4bb1b564afebd8bb4b52 as hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5434c60a33da4bb1b564afebd8bb4b52 2024-11-15T22:40:30,963 INFO [M:0;e611192d6313:34029 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5434c60a33da4bb1b564afebd8bb4b52, entries=3, sequenceid=29, filesize=5.2 K 2024-11-15T22:40:30,964 DEBUG [M:0;e611192d6313:34029 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d23e2f1a3cb5403a9454f24977ac462a as hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d23e2f1a3cb5403a9454f24977ac462a 2024-11-15T22:40:30,968 INFO [M:0;e611192d6313:34029 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d23e2f1a3cb5403a9454f24977ac462a, entries=1, sequenceid=29, filesize=5.0 K 2024-11-15T22:40:30,969 DEBUG [M:0;e611192d6313:34029 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0c8b617f10b041a7967b21937d1ec54e as hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0c8b617f10b041a7967b21937d1ec54e 2024-11-15T22:40:30,972 INFO [M:0;e611192d6313:34029 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41451/user/jenkins/test-data/1d8151c0-b3e0-292f-6312-b57eed5874ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0c8b617f10b041a7967b21937d1ec54e, entries=1, sequenceid=29, filesize=4.9 K 2024-11-15T22:40:30,973 INFO [M:0;e611192d6313:34029 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 105ms, sequenceid=29, compaction requested=false 2024-11-15T22:40:30,974 INFO [M:0;e611192d6313:34029 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T22:40:30,974 DEBUG [M:0;e611192d6313:34029 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731710430868Disabling compacts and flushes for region at 1731710430868Disabling writes for close at 1731710430868Obtaining lock to block concurrent updates at 1731710430868Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731710430868Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731710430868Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731710430869 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731710430869Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731710430885 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731710430885Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731710430894 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731710430906 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731710430906Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731710430914 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731710430926 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731710430926Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731710430934 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731710430946 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731710430946Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c12a422: reopening flushed file at 1731710430954 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f1b87d0: reopening flushed file at 1731710430959 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@eff452d: reopening flushed file at 1731710430963 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c2886be: reopening flushed file at 1731710430968 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 105ms, sequenceid=29, compaction requested=false at 1731710430973 (+5 ms)Writing region close event to WAL at 1731710430974 (+1 ms)Closed at 1731710430974 2024-11-15T22:40:30,975 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,975 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,975 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,975 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,975 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T22:40:30,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40007 is added to blk_1073741830_1006 (size=10311) 2024-11-15T22:40:30,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39019 is added to blk_1073741830_1006 (size=10311) 2024-11-15T22:40:30,977 INFO [M:0;e611192d6313:34029 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T22:40:30,977 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T22:40:30,977 INFO [M:0;e611192d6313:34029 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34029 2024-11-15T22:40:30,977 INFO [M:0;e611192d6313:34029 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T22:40:31,086 INFO [M:0;e611192d6313:34029 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T22:40:31,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:40:31,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34029-0x10140a822ba0000, quorum=127.0.0.1:51445, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T22:40:31,125 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7966f06a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:40:31,125 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4c6abd2c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:40:31,125 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:40:31,125 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34604a81{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:40:31,125 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9279e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/hadoop.log.dir/,STOPPED} 2024-11-15T22:40:31,126 WARN [BP-197085225-172.17.0.3-1731710426813 heartbeating to localhost/127.0.0.1:41451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:40:31,126 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:40:31,126 WARN [BP-197085225-172.17.0.3-1731710426813 heartbeating to localhost/127.0.0.1:41451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-197085225-172.17.0.3-1731710426813 (Datanode Uuid 93025969-7f44-490a-8e06-4c933f1de0b1) service to localhost/127.0.0.1:41451 2024-11-15T22:40:31,126 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:40:31,127 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/cluster_2e2cd3ae-1b73-fbc0-9910-a5385aa1f816/data/data3/current/BP-197085225-172.17.0.3-1731710426813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:40:31,127 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/cluster_2e2cd3ae-1b73-fbc0-9910-a5385aa1f816/data/data4/current/BP-197085225-172.17.0.3-1731710426813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:40:31,127 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:40:31,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@627a202d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T22:40:31,129 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22ed154c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:40:31,129 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:40:31,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6bfe0bbd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:40:31,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49e6dd92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/hadoop.log.dir/,STOPPED} 2024-11-15T22:40:31,130 WARN [BP-197085225-172.17.0.3-1731710426813 heartbeating to localhost/127.0.0.1:41451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T22:40:31,130 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T22:40:31,130 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T22:40:31,130 WARN [BP-197085225-172.17.0.3-1731710426813 heartbeating to localhost/127.0.0.1:41451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-197085225-172.17.0.3-1731710426813 (Datanode Uuid 08c7a6d9-8a7c-4faa-bd17-4abb77cf7c6f) service to localhost/127.0.0.1:41451 2024-11-15T22:40:31,131 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/cluster_2e2cd3ae-1b73-fbc0-9910-a5385aa1f816/data/data1/current/BP-197085225-172.17.0.3-1731710426813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:40:31,131 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/cluster_2e2cd3ae-1b73-fbc0-9910-a5385aa1f816/data/data2/current/BP-197085225-172.17.0.3-1731710426813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T22:40:31,131 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T22:40:31,136 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7216654a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T22:40:31,136 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ec9da7e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T22:40:31,136 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T22:40:31,136 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23266789{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T22:40:31,137 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e2fef96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e81d9d53-ec87-34b7-6279-06b31fcc092c/hadoop.log.dir/,STOPPED} 2024-11-15T22:40:31,141 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T22:40:31,155 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T22:40:31,162 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=271 (was 232) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41451 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41451 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41451 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41451 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41451 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:41451 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41451 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41451 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=534 (was 518) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=68 (was 68), ProcessCount=11 (was 11), AvailableMemoryMB=3860 (was 3873)